xref: /linux/drivers/net/wireless/intel/ipw2x00/ipw2200.c (revision da1d9caf95def6f0320819cf941c9fd1069ba9e1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3 
4   Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
5 
6   802.11 status code portion of this file from ethereal-0.10.6:
7     Copyright 2000, Axis Communications AB
8     Ethereal - Network traffic analyzer
9     By Gerald Combs <gerald@ethereal.com>
10     Copyright 1998 Gerald Combs
11 
12 
13   Contact Information:
14   Intel Linux Wireless <ilw@linux.intel.com>
15   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
16 
17 ******************************************************************************/
18 
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <net/cfg80211-wext.h>
22 #include "ipw2200.h"
23 #include "ipw.h"
24 
25 
26 #ifndef KBUILD_EXTMOD
27 #define VK "k"
28 #else
29 #define VK
30 #endif
31 
32 #ifdef CONFIG_IPW2200_DEBUG
33 #define VD "d"
34 #else
35 #define VD
36 #endif
37 
38 #ifdef CONFIG_IPW2200_MONITOR
39 #define VM "m"
40 #else
41 #define VM
42 #endif
43 
44 #ifdef CONFIG_IPW2200_PROMISCUOUS
45 #define VP "p"
46 #else
47 #define VP
48 #endif
49 
50 #ifdef CONFIG_IPW2200_RADIOTAP
51 #define VR "r"
52 #else
53 #define VR
54 #endif
55 
56 #ifdef CONFIG_IPW2200_QOS
57 #define VQ "q"
58 #else
59 #define VQ
60 #endif
61 
62 #define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
63 #define DRV_DESCRIPTION	"Intel(R) PRO/Wireless 2200/2915 Network Driver"
64 #define DRV_COPYRIGHT	"Copyright(c) 2003-2006 Intel Corporation"
65 #define DRV_VERSION     IPW2200_VERSION
66 
67 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
68 
69 MODULE_DESCRIPTION(DRV_DESCRIPTION);
70 MODULE_VERSION(DRV_VERSION);
71 MODULE_AUTHOR(DRV_COPYRIGHT);
72 MODULE_LICENSE("GPL");
73 MODULE_FIRMWARE("ipw2200-ibss.fw");
74 #ifdef CONFIG_IPW2200_MONITOR
75 MODULE_FIRMWARE("ipw2200-sniffer.fw");
76 #endif
77 MODULE_FIRMWARE("ipw2200-bss.fw");
78 
79 static int cmdlog = 0;
80 static int debug = 0;
81 static int default_channel = 0;
82 static int network_mode = 0;
83 
84 static u32 ipw_debug_level;
85 static int associate;
86 static int auto_create = 1;
87 static int led_support = 1;
88 static int disable = 0;
89 static int bt_coexist = 0;
90 static int hwcrypto = 0;
91 static int roaming = 1;
92 static const char ipw_modes[] = {
93 	'a', 'b', 'g', '?'
94 };
95 static int antenna = CFG_SYS_ANTENNA_BOTH;
96 
97 #ifdef CONFIG_IPW2200_PROMISCUOUS
98 static int rtap_iface = 0;     /* def: 0 -- do not create rtap interface */
99 #endif
100 
101 static struct ieee80211_rate ipw2200_rates[] = {
102 	{ .bitrate = 10 },
103 	{ .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
104 	{ .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
105 	{ .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
106 	{ .bitrate = 60 },
107 	{ .bitrate = 90 },
108 	{ .bitrate = 120 },
109 	{ .bitrate = 180 },
110 	{ .bitrate = 240 },
111 	{ .bitrate = 360 },
112 	{ .bitrate = 480 },
113 	{ .bitrate = 540 }
114 };
115 
116 #define ipw2200_a_rates		(ipw2200_rates + 4)
117 #define ipw2200_num_a_rates	8
118 #define ipw2200_bg_rates	(ipw2200_rates + 0)
119 #define ipw2200_num_bg_rates	12
120 
121 /* Ugly macro to convert literal channel numbers into their mhz equivalents
122  * There are certianly some conditions that will break this (like feeding it '30')
123  * but they shouldn't arise since nothing talks on channel 30. */
124 #define ieee80211chan2mhz(x) \
125 	(((x) <= 14) ? \
126 	(((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
127 	((x) + 1000) * 5)
128 
129 #ifdef CONFIG_IPW2200_QOS
130 static int qos_enable = 0;
131 static int qos_burst_enable = 0;
132 static int qos_no_ack_mask = 0;
133 static int burst_duration_CCK = 0;
134 static int burst_duration_OFDM = 0;
135 
136 static struct libipw_qos_parameters def_qos_parameters_OFDM = {
137 	{QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
138 	 QOS_TX3_CW_MIN_OFDM},
139 	{QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
140 	 QOS_TX3_CW_MAX_OFDM},
141 	{QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
142 	{QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
143 	{QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
144 	 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
145 };
146 
147 static struct libipw_qos_parameters def_qos_parameters_CCK = {
148 	{QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
149 	 QOS_TX3_CW_MIN_CCK},
150 	{QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
151 	 QOS_TX3_CW_MAX_CCK},
152 	{QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
153 	{QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
154 	{QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
155 	 QOS_TX3_TXOP_LIMIT_CCK}
156 };
157 
158 static struct libipw_qos_parameters def_parameters_OFDM = {
159 	{DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
160 	 DEF_TX3_CW_MIN_OFDM},
161 	{DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
162 	 DEF_TX3_CW_MAX_OFDM},
163 	{DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
164 	{DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
165 	{DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
166 	 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
167 };
168 
169 static struct libipw_qos_parameters def_parameters_CCK = {
170 	{DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
171 	 DEF_TX3_CW_MIN_CCK},
172 	{DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
173 	 DEF_TX3_CW_MAX_CCK},
174 	{DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
175 	{DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
176 	{DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
177 	 DEF_TX3_TXOP_LIMIT_CCK}
178 };
179 
180 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
181 
182 static int from_priority_to_tx_queue[] = {
183 	IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
184 	IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
185 };
186 
187 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
188 
189 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
190 				       *qos_param);
191 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
192 				     *qos_param);
193 #endif				/* CONFIG_IPW2200_QOS */
194 
195 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
196 static void ipw_remove_current_network(struct ipw_priv *priv);
197 static void ipw_rx(struct ipw_priv *priv);
198 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
199 				struct clx2_tx_queue *txq, int qindex);
200 static int ipw_queue_reset(struct ipw_priv *priv);
201 
202 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, const void *buf,
203 			     int len, int sync);
204 
205 static void ipw_tx_queue_free(struct ipw_priv *);
206 
207 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
208 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
209 static void ipw_rx_queue_replenish(void *);
210 static int ipw_up(struct ipw_priv *);
211 static void ipw_bg_up(struct work_struct *work);
212 static void ipw_down(struct ipw_priv *);
213 static void ipw_bg_down(struct work_struct *work);
214 static int ipw_config(struct ipw_priv *);
215 static int init_supported_rates(struct ipw_priv *priv,
216 				struct ipw_supported_rates *prates);
217 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
218 static void ipw_send_wep_keys(struct ipw_priv *, int);
219 
220 static int snprint_line(char *buf, size_t count,
221 			const u8 * data, u32 len, u32 ofs)
222 {
223 	int out, i, j, l;
224 	char c;
225 
226 	out = scnprintf(buf, count, "%08X", ofs);
227 
228 	for (l = 0, i = 0; i < 2; i++) {
229 		out += scnprintf(buf + out, count - out, " ");
230 		for (j = 0; j < 8 && l < len; j++, l++)
231 			out += scnprintf(buf + out, count - out, "%02X ",
232 					data[(i * 8 + j)]);
233 		for (; j < 8; j++)
234 			out += scnprintf(buf + out, count - out, "   ");
235 	}
236 
237 	out += scnprintf(buf + out, count - out, " ");
238 	for (l = 0, i = 0; i < 2; i++) {
239 		out += scnprintf(buf + out, count - out, " ");
240 		for (j = 0; j < 8 && l < len; j++, l++) {
241 			c = data[(i * 8 + j)];
242 			if (!isascii(c) || !isprint(c))
243 				c = '.';
244 
245 			out += scnprintf(buf + out, count - out, "%c", c);
246 		}
247 
248 		for (; j < 8; j++)
249 			out += scnprintf(buf + out, count - out, " ");
250 	}
251 
252 	return out;
253 }
254 
255 static void printk_buf(int level, const u8 * data, u32 len)
256 {
257 	char line[81];
258 	u32 ofs = 0;
259 	if (!(ipw_debug_level & level))
260 		return;
261 
262 	while (len) {
263 		snprint_line(line, sizeof(line), &data[ofs],
264 			     min(len, 16U), ofs);
265 		printk(KERN_DEBUG "%s\n", line);
266 		ofs += 16;
267 		len -= min(len, 16U);
268 	}
269 }
270 
271 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
272 {
273 	size_t out = size;
274 	u32 ofs = 0;
275 	int total = 0;
276 
277 	while (size && len) {
278 		out = snprint_line(output, size, &data[ofs],
279 				   min_t(size_t, len, 16U), ofs);
280 
281 		ofs += 16;
282 		output += out;
283 		size -= out;
284 		len -= min_t(size_t, len, 16U);
285 		total += out;
286 	}
287 	return total;
288 }
289 
290 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
291 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
292 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
293 
294 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
295 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
296 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
297 
298 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
299 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
300 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
301 {
302 	IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
303 		     __LINE__, (u32) (b), (u32) (c));
304 	_ipw_write_reg8(a, b, c);
305 }
306 
307 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
308 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
309 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
310 {
311 	IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
312 		     __LINE__, (u32) (b), (u32) (c));
313 	_ipw_write_reg16(a, b, c);
314 }
315 
316 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
317 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
318 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
319 {
320 	IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
321 		     __LINE__, (u32) (b), (u32) (c));
322 	_ipw_write_reg32(a, b, c);
323 }
324 
325 /* 8-bit direct write (low 4K) */
326 static inline void _ipw_write8(struct ipw_priv *ipw, unsigned long ofs,
327 		u8 val)
328 {
329 	writeb(val, ipw->hw_base + ofs);
330 }
331 
332 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
333 #define ipw_write8(ipw, ofs, val) do { \
334 	IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, \
335 			__LINE__, (u32)(ofs), (u32)(val)); \
336 	_ipw_write8(ipw, ofs, val); \
337 } while (0)
338 
339 /* 16-bit direct write (low 4K) */
340 static inline void _ipw_write16(struct ipw_priv *ipw, unsigned long ofs,
341 		u16 val)
342 {
343 	writew(val, ipw->hw_base + ofs);
344 }
345 
346 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
347 #define ipw_write16(ipw, ofs, val) do { \
348 	IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, \
349 			__LINE__, (u32)(ofs), (u32)(val)); \
350 	_ipw_write16(ipw, ofs, val); \
351 } while (0)
352 
353 /* 32-bit direct write (low 4K) */
354 static inline void _ipw_write32(struct ipw_priv *ipw, unsigned long ofs,
355 		u32 val)
356 {
357 	writel(val, ipw->hw_base + ofs);
358 }
359 
360 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
361 #define ipw_write32(ipw, ofs, val) do { \
362 	IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, \
363 			__LINE__, (u32)(ofs), (u32)(val)); \
364 	_ipw_write32(ipw, ofs, val); \
365 } while (0)
366 
367 /* 8-bit direct read (low 4K) */
368 static inline u8 _ipw_read8(struct ipw_priv *ipw, unsigned long ofs)
369 {
370 	return readb(ipw->hw_base + ofs);
371 }
372 
373 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
374 #define ipw_read8(ipw, ofs) ({ \
375 	IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", __FILE__, __LINE__, \
376 			(u32)(ofs)); \
377 	_ipw_read8(ipw, ofs); \
378 })
379 
380 /* 16-bit direct read (low 4K) */
381 static inline u16 _ipw_read16(struct ipw_priv *ipw, unsigned long ofs)
382 {
383 	return readw(ipw->hw_base + ofs);
384 }
385 
386 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
387 #define ipw_read16(ipw, ofs) ({ \
388 	IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", __FILE__, __LINE__, \
389 			(u32)(ofs)); \
390 	_ipw_read16(ipw, ofs); \
391 })
392 
393 /* 32-bit direct read (low 4K) */
394 static inline u32 _ipw_read32(struct ipw_priv *ipw, unsigned long ofs)
395 {
396 	return readl(ipw->hw_base + ofs);
397 }
398 
399 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
400 #define ipw_read32(ipw, ofs) ({ \
401 	IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", __FILE__, __LINE__, \
402 			(u32)(ofs)); \
403 	_ipw_read32(ipw, ofs); \
404 })
405 
406 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
407 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
408 #define ipw_read_indirect(a, b, c, d) ({ \
409 	IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %u bytes\n", __FILE__, \
410 			__LINE__, (u32)(b), (u32)(d)); \
411 	_ipw_read_indirect(a, b, c, d); \
412 })
413 
414 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
415 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
416 				int num);
417 #define ipw_write_indirect(a, b, c, d) do { \
418 	IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %u bytes\n", __FILE__, \
419 			__LINE__, (u32)(b), (u32)(d)); \
420 	_ipw_write_indirect(a, b, c, d); \
421 } while (0)
422 
423 /* 32-bit indirect write (above 4K) */
424 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
425 {
426 	IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
427 	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
428 	_ipw_write32(priv, IPW_INDIRECT_DATA, value);
429 }
430 
431 /* 8-bit indirect write (above 4K) */
432 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
433 {
434 	u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK;	/* dword align */
435 	u32 dif_len = reg - aligned_addr;
436 
437 	IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
438 	_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
439 	_ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
440 }
441 
442 /* 16-bit indirect write (above 4K) */
443 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
444 {
445 	u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK;	/* dword align */
446 	u32 dif_len = (reg - aligned_addr) & (~0x1ul);
447 
448 	IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
449 	_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
450 	_ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
451 }
452 
453 /* 8-bit indirect read (above 4K) */
454 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
455 {
456 	u32 word;
457 	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
458 	IPW_DEBUG_IO(" reg = 0x%8X :\n", reg);
459 	word = _ipw_read32(priv, IPW_INDIRECT_DATA);
460 	return (word >> ((reg & 0x3) * 8)) & 0xff;
461 }
462 
463 /* 32-bit indirect read (above 4K) */
464 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
465 {
466 	u32 value;
467 
468 	IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
469 
470 	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
471 	value = _ipw_read32(priv, IPW_INDIRECT_DATA);
472 	IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x\n", reg, value);
473 	return value;
474 }
475 
476 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
477 /*    for area above 1st 4K of SRAM/reg space */
478 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
479 			       int num)
480 {
481 	u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK;	/* dword align */
482 	u32 dif_len = addr - aligned_addr;
483 	u32 i;
484 
485 	IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
486 
487 	if (num <= 0) {
488 		return;
489 	}
490 
491 	/* Read the first dword (or portion) byte by byte */
492 	if (unlikely(dif_len)) {
493 		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
494 		/* Start reading at aligned_addr + dif_len */
495 		for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
496 			*buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
497 		aligned_addr += 4;
498 	}
499 
500 	/* Read all of the middle dwords as dwords, with auto-increment */
501 	_ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
502 	for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
503 		*(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
504 
505 	/* Read the last dword (or portion) byte by byte */
506 	if (unlikely(num)) {
507 		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
508 		for (i = 0; num > 0; i++, num--)
509 			*buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
510 	}
511 }
512 
513 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
514 /*    for area above 1st 4K of SRAM/reg space */
515 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
516 				int num)
517 {
518 	u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK;	/* dword align */
519 	u32 dif_len = addr - aligned_addr;
520 	u32 i;
521 
522 	IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
523 
524 	if (num <= 0) {
525 		return;
526 	}
527 
528 	/* Write the first dword (or portion) byte by byte */
529 	if (unlikely(dif_len)) {
530 		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
531 		/* Start writing at aligned_addr + dif_len */
532 		for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
533 			_ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
534 		aligned_addr += 4;
535 	}
536 
537 	/* Write all of the middle dwords as dwords, with auto-increment */
538 	_ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
539 	for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
540 		_ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
541 
542 	/* Write the last dword (or portion) byte by byte */
543 	if (unlikely(num)) {
544 		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
545 		for (i = 0; num > 0; i++, num--, buf++)
546 			_ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
547 	}
548 }
549 
550 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
551 /*    for 1st 4K of SRAM/regs space */
552 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
553 			     int num)
554 {
555 	memcpy_toio((priv->hw_base + addr), buf, num);
556 }
557 
558 /* Set bit(s) in low 4K of SRAM/regs */
559 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
560 {
561 	ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
562 }
563 
564 /* Clear bit(s) in low 4K of SRAM/regs */
565 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
566 {
567 	ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
568 }
569 
570 static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
571 {
572 	if (priv->status & STATUS_INT_ENABLED)
573 		return;
574 	priv->status |= STATUS_INT_ENABLED;
575 	ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
576 }
577 
578 static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
579 {
580 	if (!(priv->status & STATUS_INT_ENABLED))
581 		return;
582 	priv->status &= ~STATUS_INT_ENABLED;
583 	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
584 }
585 
586 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
587 {
588 	unsigned long flags;
589 
590 	spin_lock_irqsave(&priv->irq_lock, flags);
591 	__ipw_enable_interrupts(priv);
592 	spin_unlock_irqrestore(&priv->irq_lock, flags);
593 }
594 
595 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
596 {
597 	unsigned long flags;
598 
599 	spin_lock_irqsave(&priv->irq_lock, flags);
600 	__ipw_disable_interrupts(priv);
601 	spin_unlock_irqrestore(&priv->irq_lock, flags);
602 }
603 
604 static char *ipw_error_desc(u32 val)
605 {
606 	switch (val) {
607 	case IPW_FW_ERROR_OK:
608 		return "ERROR_OK";
609 	case IPW_FW_ERROR_FAIL:
610 		return "ERROR_FAIL";
611 	case IPW_FW_ERROR_MEMORY_UNDERFLOW:
612 		return "MEMORY_UNDERFLOW";
613 	case IPW_FW_ERROR_MEMORY_OVERFLOW:
614 		return "MEMORY_OVERFLOW";
615 	case IPW_FW_ERROR_BAD_PARAM:
616 		return "BAD_PARAM";
617 	case IPW_FW_ERROR_BAD_CHECKSUM:
618 		return "BAD_CHECKSUM";
619 	case IPW_FW_ERROR_NMI_INTERRUPT:
620 		return "NMI_INTERRUPT";
621 	case IPW_FW_ERROR_BAD_DATABASE:
622 		return "BAD_DATABASE";
623 	case IPW_FW_ERROR_ALLOC_FAIL:
624 		return "ALLOC_FAIL";
625 	case IPW_FW_ERROR_DMA_UNDERRUN:
626 		return "DMA_UNDERRUN";
627 	case IPW_FW_ERROR_DMA_STATUS:
628 		return "DMA_STATUS";
629 	case IPW_FW_ERROR_DINO_ERROR:
630 		return "DINO_ERROR";
631 	case IPW_FW_ERROR_EEPROM_ERROR:
632 		return "EEPROM_ERROR";
633 	case IPW_FW_ERROR_SYSASSERT:
634 		return "SYSASSERT";
635 	case IPW_FW_ERROR_FATAL_ERROR:
636 		return "FATAL_ERROR";
637 	default:
638 		return "UNKNOWN_ERROR";
639 	}
640 }
641 
642 static void ipw_dump_error_log(struct ipw_priv *priv,
643 			       struct ipw_fw_error *error)
644 {
645 	u32 i;
646 
647 	if (!error) {
648 		IPW_ERROR("Error allocating and capturing error log.  "
649 			  "Nothing to dump.\n");
650 		return;
651 	}
652 
653 	IPW_ERROR("Start IPW Error Log Dump:\n");
654 	IPW_ERROR("Status: 0x%08X, Config: %08X\n",
655 		  error->status, error->config);
656 
657 	for (i = 0; i < error->elem_len; i++)
658 		IPW_ERROR("%s %i 0x%08x  0x%08x  0x%08x  0x%08x  0x%08x\n",
659 			  ipw_error_desc(error->elem[i].desc),
660 			  error->elem[i].time,
661 			  error->elem[i].blink1,
662 			  error->elem[i].blink2,
663 			  error->elem[i].link1,
664 			  error->elem[i].link2, error->elem[i].data);
665 	for (i = 0; i < error->log_len; i++)
666 		IPW_ERROR("%i\t0x%08x\t%i\n",
667 			  error->log[i].time,
668 			  error->log[i].data, error->log[i].event);
669 }
670 
671 static inline int ipw_is_init(struct ipw_priv *priv)
672 {
673 	return (priv->status & STATUS_INIT) ? 1 : 0;
674 }
675 
676 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
677 {
678 	u32 addr, field_info, field_len, field_count, total_len;
679 
680 	IPW_DEBUG_ORD("ordinal = %i\n", ord);
681 
682 	if (!priv || !val || !len) {
683 		IPW_DEBUG_ORD("Invalid argument\n");
684 		return -EINVAL;
685 	}
686 
687 	/* verify device ordinal tables have been initialized */
688 	if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
689 		IPW_DEBUG_ORD("Access ordinals before initialization\n");
690 		return -EINVAL;
691 	}
692 
693 	switch (IPW_ORD_TABLE_ID_MASK & ord) {
694 	case IPW_ORD_TABLE_0_MASK:
695 		/*
696 		 * TABLE 0: Direct access to a table of 32 bit values
697 		 *
698 		 * This is a very simple table with the data directly
699 		 * read from the table
700 		 */
701 
702 		/* remove the table id from the ordinal */
703 		ord &= IPW_ORD_TABLE_VALUE_MASK;
704 
705 		/* boundary check */
706 		if (ord > priv->table0_len) {
707 			IPW_DEBUG_ORD("ordinal value (%i) longer then "
708 				      "max (%i)\n", ord, priv->table0_len);
709 			return -EINVAL;
710 		}
711 
712 		/* verify we have enough room to store the value */
713 		if (*len < sizeof(u32)) {
714 			IPW_DEBUG_ORD("ordinal buffer length too small, "
715 				      "need %zd\n", sizeof(u32));
716 			return -EINVAL;
717 		}
718 
719 		IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
720 			      ord, priv->table0_addr + (ord << 2));
721 
722 		*len = sizeof(u32);
723 		ord <<= 2;
724 		*((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
725 		break;
726 
727 	case IPW_ORD_TABLE_1_MASK:
728 		/*
729 		 * TABLE 1: Indirect access to a table of 32 bit values
730 		 *
731 		 * This is a fairly large table of u32 values each
732 		 * representing starting addr for the data (which is
733 		 * also a u32)
734 		 */
735 
736 		/* remove the table id from the ordinal */
737 		ord &= IPW_ORD_TABLE_VALUE_MASK;
738 
739 		/* boundary check */
740 		if (ord > priv->table1_len) {
741 			IPW_DEBUG_ORD("ordinal value too long\n");
742 			return -EINVAL;
743 		}
744 
745 		/* verify we have enough room to store the value */
746 		if (*len < sizeof(u32)) {
747 			IPW_DEBUG_ORD("ordinal buffer length too small, "
748 				      "need %zd\n", sizeof(u32));
749 			return -EINVAL;
750 		}
751 
752 		*((u32 *) val) =
753 		    ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
754 		*len = sizeof(u32);
755 		break;
756 
757 	case IPW_ORD_TABLE_2_MASK:
758 		/*
759 		 * TABLE 2: Indirect access to a table of variable sized values
760 		 *
761 		 * This table consist of six values, each containing
762 		 *     - dword containing the starting offset of the data
763 		 *     - dword containing the lengh in the first 16bits
764 		 *       and the count in the second 16bits
765 		 */
766 
767 		/* remove the table id from the ordinal */
768 		ord &= IPW_ORD_TABLE_VALUE_MASK;
769 
770 		/* boundary check */
771 		if (ord > priv->table2_len) {
772 			IPW_DEBUG_ORD("ordinal value too long\n");
773 			return -EINVAL;
774 		}
775 
776 		/* get the address of statistic */
777 		addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
778 
779 		/* get the second DW of statistics ;
780 		 * two 16-bit words - first is length, second is count */
781 		field_info =
782 		    ipw_read_reg32(priv,
783 				   priv->table2_addr + (ord << 3) +
784 				   sizeof(u32));
785 
786 		/* get each entry length */
787 		field_len = *((u16 *) & field_info);
788 
789 		/* get number of entries */
790 		field_count = *(((u16 *) & field_info) + 1);
791 
792 		/* abort if not enough memory */
793 		total_len = field_len * field_count;
794 		if (total_len > *len) {
795 			*len = total_len;
796 			return -EINVAL;
797 		}
798 
799 		*len = total_len;
800 		if (!total_len)
801 			return 0;
802 
803 		IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
804 			      "field_info = 0x%08x\n",
805 			      addr, total_len, field_info);
806 		ipw_read_indirect(priv, addr, val, total_len);
807 		break;
808 
809 	default:
810 		IPW_DEBUG_ORD("Invalid ordinal!\n");
811 		return -EINVAL;
812 
813 	}
814 
815 	return 0;
816 }
817 
818 static void ipw_init_ordinals(struct ipw_priv *priv)
819 {
820 	priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
821 	priv->table0_len = ipw_read32(priv, priv->table0_addr);
822 
823 	IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
824 		      priv->table0_addr, priv->table0_len);
825 
826 	priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
827 	priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
828 
829 	IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
830 		      priv->table1_addr, priv->table1_len);
831 
832 	priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
833 	priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
834 	priv->table2_len &= 0x0000ffff;	/* use first two bytes */
835 
836 	IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
837 		      priv->table2_addr, priv->table2_len);
838 
839 }
840 
841 static u32 ipw_register_toggle(u32 reg)
842 {
843 	reg &= ~IPW_START_STANDBY;
844 	if (reg & IPW_GATE_ODMA)
845 		reg &= ~IPW_GATE_ODMA;
846 	if (reg & IPW_GATE_IDMA)
847 		reg &= ~IPW_GATE_IDMA;
848 	if (reg & IPW_GATE_ADMA)
849 		reg &= ~IPW_GATE_ADMA;
850 	return reg;
851 }
852 
853 /*
854  * LED behavior:
855  * - On radio ON, turn on any LEDs that require to be on during start
856  * - On initialization, start unassociated blink
857  * - On association, disable unassociated blink
858  * - On disassociation, start unassociated blink
859  * - On radio OFF, turn off any LEDs started during radio on
860  *
861  */
862 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
863 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
864 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
865 
866 static void ipw_led_link_on(struct ipw_priv *priv)
867 {
868 	unsigned long flags;
869 	u32 led;
870 
871 	/* If configured to not use LEDs, or nic_type is 1,
872 	 * then we don't toggle a LINK led */
873 	if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
874 		return;
875 
876 	spin_lock_irqsave(&priv->lock, flags);
877 
878 	if (!(priv->status & STATUS_RF_KILL_MASK) &&
879 	    !(priv->status & STATUS_LED_LINK_ON)) {
880 		IPW_DEBUG_LED("Link LED On\n");
881 		led = ipw_read_reg32(priv, IPW_EVENT_REG);
882 		led |= priv->led_association_on;
883 
884 		led = ipw_register_toggle(led);
885 
886 		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
887 		ipw_write_reg32(priv, IPW_EVENT_REG, led);
888 
889 		priv->status |= STATUS_LED_LINK_ON;
890 
891 		/* If we aren't associated, schedule turning the LED off */
892 		if (!(priv->status & STATUS_ASSOCIATED))
893 			schedule_delayed_work(&priv->led_link_off,
894 					      LD_TIME_LINK_ON);
895 	}
896 
897 	spin_unlock_irqrestore(&priv->lock, flags);
898 }
899 
900 static void ipw_bg_led_link_on(struct work_struct *work)
901 {
902 	struct ipw_priv *priv =
903 		container_of(work, struct ipw_priv, led_link_on.work);
904 	mutex_lock(&priv->mutex);
905 	ipw_led_link_on(priv);
906 	mutex_unlock(&priv->mutex);
907 }
908 
909 static void ipw_led_link_off(struct ipw_priv *priv)
910 {
911 	unsigned long flags;
912 	u32 led;
913 
914 	/* If configured not to use LEDs, or nic type is 1,
915 	 * then we don't goggle the LINK led. */
916 	if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
917 		return;
918 
919 	spin_lock_irqsave(&priv->lock, flags);
920 
921 	if (priv->status & STATUS_LED_LINK_ON) {
922 		led = ipw_read_reg32(priv, IPW_EVENT_REG);
923 		led &= priv->led_association_off;
924 		led = ipw_register_toggle(led);
925 
926 		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
927 		ipw_write_reg32(priv, IPW_EVENT_REG, led);
928 
929 		IPW_DEBUG_LED("Link LED Off\n");
930 
931 		priv->status &= ~STATUS_LED_LINK_ON;
932 
933 		/* If we aren't associated and the radio is on, schedule
934 		 * turning the LED on (blink while unassociated) */
935 		if (!(priv->status & STATUS_RF_KILL_MASK) &&
936 		    !(priv->status & STATUS_ASSOCIATED))
937 			schedule_delayed_work(&priv->led_link_on,
938 					      LD_TIME_LINK_OFF);
939 
940 	}
941 
942 	spin_unlock_irqrestore(&priv->lock, flags);
943 }
944 
945 static void ipw_bg_led_link_off(struct work_struct *work)
946 {
947 	struct ipw_priv *priv =
948 		container_of(work, struct ipw_priv, led_link_off.work);
949 	mutex_lock(&priv->mutex);
950 	ipw_led_link_off(priv);
951 	mutex_unlock(&priv->mutex);
952 }
953 
954 static void __ipw_led_activity_on(struct ipw_priv *priv)
955 {
956 	u32 led;
957 
958 	if (priv->config & CFG_NO_LED)
959 		return;
960 
961 	if (priv->status & STATUS_RF_KILL_MASK)
962 		return;
963 
964 	if (!(priv->status & STATUS_LED_ACT_ON)) {
965 		led = ipw_read_reg32(priv, IPW_EVENT_REG);
966 		led |= priv->led_activity_on;
967 
968 		led = ipw_register_toggle(led);
969 
970 		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
971 		ipw_write_reg32(priv, IPW_EVENT_REG, led);
972 
973 		IPW_DEBUG_LED("Activity LED On\n");
974 
975 		priv->status |= STATUS_LED_ACT_ON;
976 
977 		cancel_delayed_work(&priv->led_act_off);
978 		schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
979 	} else {
980 		/* Reschedule LED off for full time period */
981 		cancel_delayed_work(&priv->led_act_off);
982 		schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
983 	}
984 }
985 
986 #if 0
987 void ipw_led_activity_on(struct ipw_priv *priv)
988 {
989 	unsigned long flags;
990 	spin_lock_irqsave(&priv->lock, flags);
991 	__ipw_led_activity_on(priv);
992 	spin_unlock_irqrestore(&priv->lock, flags);
993 }
994 #endif  /*  0  */
995 
996 static void ipw_led_activity_off(struct ipw_priv *priv)
997 {
998 	unsigned long flags;
999 	u32 led;
1000 
1001 	if (priv->config & CFG_NO_LED)
1002 		return;
1003 
1004 	spin_lock_irqsave(&priv->lock, flags);
1005 
1006 	if (priv->status & STATUS_LED_ACT_ON) {
1007 		led = ipw_read_reg32(priv, IPW_EVENT_REG);
1008 		led &= priv->led_activity_off;
1009 
1010 		led = ipw_register_toggle(led);
1011 
1012 		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1013 		ipw_write_reg32(priv, IPW_EVENT_REG, led);
1014 
1015 		IPW_DEBUG_LED("Activity LED Off\n");
1016 
1017 		priv->status &= ~STATUS_LED_ACT_ON;
1018 	}
1019 
1020 	spin_unlock_irqrestore(&priv->lock, flags);
1021 }
1022 
1023 static void ipw_bg_led_activity_off(struct work_struct *work)
1024 {
1025 	struct ipw_priv *priv =
1026 		container_of(work, struct ipw_priv, led_act_off.work);
1027 	mutex_lock(&priv->mutex);
1028 	ipw_led_activity_off(priv);
1029 	mutex_unlock(&priv->mutex);
1030 }
1031 
1032 static void ipw_led_band_on(struct ipw_priv *priv)
1033 {
1034 	unsigned long flags;
1035 	u32 led;
1036 
1037 	/* Only nic type 1 supports mode LEDs */
1038 	if (priv->config & CFG_NO_LED ||
1039 	    priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1040 		return;
1041 
1042 	spin_lock_irqsave(&priv->lock, flags);
1043 
1044 	led = ipw_read_reg32(priv, IPW_EVENT_REG);
1045 	if (priv->assoc_network->mode == IEEE_A) {
1046 		led |= priv->led_ofdm_on;
1047 		led &= priv->led_association_off;
1048 		IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1049 	} else if (priv->assoc_network->mode == IEEE_G) {
1050 		led |= priv->led_ofdm_on;
1051 		led |= priv->led_association_on;
1052 		IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1053 	} else {
1054 		led &= priv->led_ofdm_off;
1055 		led |= priv->led_association_on;
1056 		IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1057 	}
1058 
1059 	led = ipw_register_toggle(led);
1060 
1061 	IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1062 	ipw_write_reg32(priv, IPW_EVENT_REG, led);
1063 
1064 	spin_unlock_irqrestore(&priv->lock, flags);
1065 }
1066 
1067 static void ipw_led_band_off(struct ipw_priv *priv)
1068 {
1069 	unsigned long flags;
1070 	u32 led;
1071 
1072 	/* Only nic type 1 supports mode LEDs */
1073 	if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1074 		return;
1075 
1076 	spin_lock_irqsave(&priv->lock, flags);
1077 
1078 	led = ipw_read_reg32(priv, IPW_EVENT_REG);
1079 	led &= priv->led_ofdm_off;
1080 	led &= priv->led_association_off;
1081 
1082 	led = ipw_register_toggle(led);
1083 
1084 	IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1085 	ipw_write_reg32(priv, IPW_EVENT_REG, led);
1086 
1087 	spin_unlock_irqrestore(&priv->lock, flags);
1088 }
1089 
1090 static void ipw_led_radio_on(struct ipw_priv *priv)
1091 {
1092 	ipw_led_link_on(priv);
1093 }
1094 
1095 static void ipw_led_radio_off(struct ipw_priv *priv)
1096 {
1097 	ipw_led_activity_off(priv);
1098 	ipw_led_link_off(priv);
1099 }
1100 
1101 static void ipw_led_link_up(struct ipw_priv *priv)
1102 {
1103 	/* Set the Link Led on for all nic types */
1104 	ipw_led_link_on(priv);
1105 }
1106 
1107 static void ipw_led_link_down(struct ipw_priv *priv)
1108 {
1109 	ipw_led_activity_off(priv);
1110 	ipw_led_link_off(priv);
1111 
1112 	if (priv->status & STATUS_RF_KILL_MASK)
1113 		ipw_led_radio_off(priv);
1114 }
1115 
1116 static void ipw_led_init(struct ipw_priv *priv)
1117 {
1118 	priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1119 
1120 	/* Set the default PINs for the link and activity leds */
1121 	priv->led_activity_on = IPW_ACTIVITY_LED;
1122 	priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1123 
1124 	priv->led_association_on = IPW_ASSOCIATED_LED;
1125 	priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1126 
1127 	/* Set the default PINs for the OFDM leds */
1128 	priv->led_ofdm_on = IPW_OFDM_LED;
1129 	priv->led_ofdm_off = ~(IPW_OFDM_LED);
1130 
1131 	switch (priv->nic_type) {
1132 	case EEPROM_NIC_TYPE_1:
1133 		/* In this NIC type, the LEDs are reversed.... */
1134 		priv->led_activity_on = IPW_ASSOCIATED_LED;
1135 		priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1136 		priv->led_association_on = IPW_ACTIVITY_LED;
1137 		priv->led_association_off = ~(IPW_ACTIVITY_LED);
1138 
1139 		if (!(priv->config & CFG_NO_LED))
1140 			ipw_led_band_on(priv);
1141 
1142 		/* And we don't blink link LEDs for this nic, so
1143 		 * just return here */
1144 		return;
1145 
1146 	case EEPROM_NIC_TYPE_3:
1147 	case EEPROM_NIC_TYPE_2:
1148 	case EEPROM_NIC_TYPE_4:
1149 	case EEPROM_NIC_TYPE_0:
1150 		break;
1151 
1152 	default:
1153 		IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1154 			       priv->nic_type);
1155 		priv->nic_type = EEPROM_NIC_TYPE_0;
1156 		break;
1157 	}
1158 
1159 	if (!(priv->config & CFG_NO_LED)) {
1160 		if (priv->status & STATUS_ASSOCIATED)
1161 			ipw_led_link_on(priv);
1162 		else
1163 			ipw_led_link_off(priv);
1164 	}
1165 }
1166 
1167 static void ipw_led_shutdown(struct ipw_priv *priv)
1168 {
1169 	ipw_led_activity_off(priv);
1170 	ipw_led_link_off(priv);
1171 	ipw_led_band_off(priv);
1172 	cancel_delayed_work(&priv->led_link_on);
1173 	cancel_delayed_work(&priv->led_link_off);
1174 	cancel_delayed_work(&priv->led_act_off);
1175 }
1176 
1177 /*
1178  * The following adds a new attribute to the sysfs representation
1179  * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1180  * used for controlling the debug level.
1181  *
1182  * See the level definitions in ipw for details.
1183  */
1184 static ssize_t debug_level_show(struct device_driver *d, char *buf)
1185 {
1186 	return sprintf(buf, "0x%08X\n", ipw_debug_level);
1187 }
1188 
1189 static ssize_t debug_level_store(struct device_driver *d, const char *buf,
1190 				 size_t count)
1191 {
1192 	char *p = (char *)buf;
1193 	u32 val;
1194 
1195 	if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1196 		p++;
1197 		if (p[0] == 'x' || p[0] == 'X')
1198 			p++;
1199 		val = simple_strtoul(p, &p, 16);
1200 	} else
1201 		val = simple_strtoul(p, &p, 10);
1202 	if (p == buf)
1203 		printk(KERN_INFO DRV_NAME
1204 		       ": %s is not in hex or decimal form.\n", buf);
1205 	else
1206 		ipw_debug_level = val;
1207 
1208 	return strnlen(buf, count);
1209 }
1210 static DRIVER_ATTR_RW(debug_level);
1211 
1212 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1213 {
1214 	/* length = 1st dword in log */
1215 	return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1216 }
1217 
1218 static void ipw_capture_event_log(struct ipw_priv *priv,
1219 				  u32 log_len, struct ipw_event *log)
1220 {
1221 	u32 base;
1222 
1223 	if (log_len) {
1224 		base = ipw_read32(priv, IPW_EVENT_LOG);
1225 		ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1226 				  (u8 *) log, sizeof(*log) * log_len);
1227 	}
1228 }
1229 
1230 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1231 {
1232 	struct ipw_fw_error *error;
1233 	u32 log_len = ipw_get_event_log_len(priv);
1234 	u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1235 	u32 elem_len = ipw_read_reg32(priv, base);
1236 
1237 	error = kmalloc(sizeof(*error) +
1238 			sizeof(*error->elem) * elem_len +
1239 			sizeof(*error->log) * log_len, GFP_ATOMIC);
1240 	if (!error) {
1241 		IPW_ERROR("Memory allocation for firmware error log "
1242 			  "failed.\n");
1243 		return NULL;
1244 	}
1245 	error->jiffies = jiffies;
1246 	error->status = priv->status;
1247 	error->config = priv->config;
1248 	error->elem_len = elem_len;
1249 	error->log_len = log_len;
1250 	error->elem = (struct ipw_error_elem *)error->payload;
1251 	error->log = (struct ipw_event *)(error->elem + elem_len);
1252 
1253 	ipw_capture_event_log(priv, log_len, error->log);
1254 
1255 	if (elem_len)
1256 		ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1257 				  sizeof(*error->elem) * elem_len);
1258 
1259 	return error;
1260 }
1261 
1262 static ssize_t event_log_show(struct device *d,
1263 			      struct device_attribute *attr, char *buf)
1264 {
1265 	struct ipw_priv *priv = dev_get_drvdata(d);
1266 	u32 log_len = ipw_get_event_log_len(priv);
1267 	u32 log_size;
1268 	struct ipw_event *log;
1269 	u32 len = 0, i;
1270 
1271 	/* not using min() because of its strict type checking */
1272 	log_size = PAGE_SIZE / sizeof(*log) > log_len ?
1273 			sizeof(*log) * log_len : PAGE_SIZE;
1274 	log = kzalloc(log_size, GFP_KERNEL);
1275 	if (!log) {
1276 		IPW_ERROR("Unable to allocate memory for log\n");
1277 		return 0;
1278 	}
1279 	log_len = log_size / sizeof(*log);
1280 	ipw_capture_event_log(priv, log_len, log);
1281 
1282 	len += scnprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1283 	for (i = 0; i < log_len; i++)
1284 		len += scnprintf(buf + len, PAGE_SIZE - len,
1285 				"\n%08X%08X%08X",
1286 				log[i].time, log[i].event, log[i].data);
1287 	len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
1288 	kfree(log);
1289 	return len;
1290 }
1291 
1292 static DEVICE_ATTR_RO(event_log);
1293 
1294 static ssize_t error_show(struct device *d,
1295 			  struct device_attribute *attr, char *buf)
1296 {
1297 	struct ipw_priv *priv = dev_get_drvdata(d);
1298 	u32 len = 0, i;
1299 	if (!priv->error)
1300 		return 0;
1301 	len += scnprintf(buf + len, PAGE_SIZE - len,
1302 			"%08lX%08X%08X%08X",
1303 			priv->error->jiffies,
1304 			priv->error->status,
1305 			priv->error->config, priv->error->elem_len);
1306 	for (i = 0; i < priv->error->elem_len; i++)
1307 		len += scnprintf(buf + len, PAGE_SIZE - len,
1308 				"\n%08X%08X%08X%08X%08X%08X%08X",
1309 				priv->error->elem[i].time,
1310 				priv->error->elem[i].desc,
1311 				priv->error->elem[i].blink1,
1312 				priv->error->elem[i].blink2,
1313 				priv->error->elem[i].link1,
1314 				priv->error->elem[i].link2,
1315 				priv->error->elem[i].data);
1316 
1317 	len += scnprintf(buf + len, PAGE_SIZE - len,
1318 			"\n%08X", priv->error->log_len);
1319 	for (i = 0; i < priv->error->log_len; i++)
1320 		len += scnprintf(buf + len, PAGE_SIZE - len,
1321 				"\n%08X%08X%08X",
1322 				priv->error->log[i].time,
1323 				priv->error->log[i].event,
1324 				priv->error->log[i].data);
1325 	len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
1326 	return len;
1327 }
1328 
1329 static ssize_t error_store(struct device *d,
1330 			   struct device_attribute *attr,
1331 			   const char *buf, size_t count)
1332 {
1333 	struct ipw_priv *priv = dev_get_drvdata(d);
1334 
1335 	kfree(priv->error);
1336 	priv->error = NULL;
1337 	return count;
1338 }
1339 
1340 static DEVICE_ATTR_RW(error);
1341 
1342 static ssize_t cmd_log_show(struct device *d,
1343 			    struct device_attribute *attr, char *buf)
1344 {
1345 	struct ipw_priv *priv = dev_get_drvdata(d);
1346 	u32 len = 0, i;
1347 	if (!priv->cmdlog)
1348 		return 0;
1349 	for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1350 	     (i != priv->cmdlog_pos) && (len < PAGE_SIZE);
1351 	     i = (i + 1) % priv->cmdlog_len) {
1352 		len +=
1353 		    scnprintf(buf + len, PAGE_SIZE - len,
1354 			     "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1355 			     priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1356 			     priv->cmdlog[i].cmd.len);
1357 		len +=
1358 		    snprintk_buf(buf + len, PAGE_SIZE - len,
1359 				 (u8 *) priv->cmdlog[i].cmd.param,
1360 				 priv->cmdlog[i].cmd.len);
1361 		len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
1362 	}
1363 	len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
1364 	return len;
1365 }
1366 
1367 static DEVICE_ATTR_RO(cmd_log);
1368 
1369 #ifdef CONFIG_IPW2200_PROMISCUOUS
1370 static void ipw_prom_free(struct ipw_priv *priv);
1371 static int ipw_prom_alloc(struct ipw_priv *priv);
1372 static ssize_t rtap_iface_store(struct device *d,
1373 			 struct device_attribute *attr,
1374 			 const char *buf, size_t count)
1375 {
1376 	struct ipw_priv *priv = dev_get_drvdata(d);
1377 	int rc = 0;
1378 
1379 	if (count < 1)
1380 		return -EINVAL;
1381 
1382 	switch (buf[0]) {
1383 	case '0':
1384 		if (!rtap_iface)
1385 			return count;
1386 
1387 		if (netif_running(priv->prom_net_dev)) {
1388 			IPW_WARNING("Interface is up.  Cannot unregister.\n");
1389 			return count;
1390 		}
1391 
1392 		ipw_prom_free(priv);
1393 		rtap_iface = 0;
1394 		break;
1395 
1396 	case '1':
1397 		if (rtap_iface)
1398 			return count;
1399 
1400 		rc = ipw_prom_alloc(priv);
1401 		if (!rc)
1402 			rtap_iface = 1;
1403 		break;
1404 
1405 	default:
1406 		return -EINVAL;
1407 	}
1408 
1409 	if (rc) {
1410 		IPW_ERROR("Failed to register promiscuous network "
1411 			  "device (error %d).\n", rc);
1412 	}
1413 
1414 	return count;
1415 }
1416 
1417 static ssize_t rtap_iface_show(struct device *d,
1418 			struct device_attribute *attr,
1419 			char *buf)
1420 {
1421 	struct ipw_priv *priv = dev_get_drvdata(d);
1422 	if (rtap_iface)
1423 		return sprintf(buf, "%s", priv->prom_net_dev->name);
1424 	else {
1425 		buf[0] = '-';
1426 		buf[1] = '1';
1427 		buf[2] = '\0';
1428 		return 3;
1429 	}
1430 }
1431 
1432 static DEVICE_ATTR_ADMIN_RW(rtap_iface);
1433 
1434 static ssize_t rtap_filter_store(struct device *d,
1435 			 struct device_attribute *attr,
1436 			 const char *buf, size_t count)
1437 {
1438 	struct ipw_priv *priv = dev_get_drvdata(d);
1439 
1440 	if (!priv->prom_priv) {
1441 		IPW_ERROR("Attempting to set filter without "
1442 			  "rtap_iface enabled.\n");
1443 		return -EPERM;
1444 	}
1445 
1446 	priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1447 
1448 	IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1449 		       BIT_ARG16(priv->prom_priv->filter));
1450 
1451 	return count;
1452 }
1453 
1454 static ssize_t rtap_filter_show(struct device *d,
1455 			struct device_attribute *attr,
1456 			char *buf)
1457 {
1458 	struct ipw_priv *priv = dev_get_drvdata(d);
1459 	return sprintf(buf, "0x%04X",
1460 		       priv->prom_priv ? priv->prom_priv->filter : 0);
1461 }
1462 
1463 static DEVICE_ATTR_ADMIN_RW(rtap_filter);
1464 #endif
1465 
1466 static ssize_t scan_age_show(struct device *d, struct device_attribute *attr,
1467 			     char *buf)
1468 {
1469 	struct ipw_priv *priv = dev_get_drvdata(d);
1470 	return sprintf(buf, "%d\n", priv->ieee->scan_age);
1471 }
1472 
1473 static ssize_t scan_age_store(struct device *d, struct device_attribute *attr,
1474 			      const char *buf, size_t count)
1475 {
1476 	struct ipw_priv *priv = dev_get_drvdata(d);
1477 	struct net_device *dev = priv->net_dev;
1478 	char buffer[] = "00000000";
1479 	unsigned long len =
1480 	    (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1481 	unsigned long val;
1482 	char *p = buffer;
1483 
1484 	IPW_DEBUG_INFO("enter\n");
1485 
1486 	strncpy(buffer, buf, len);
1487 	buffer[len] = 0;
1488 
1489 	if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1490 		p++;
1491 		if (p[0] == 'x' || p[0] == 'X')
1492 			p++;
1493 		val = simple_strtoul(p, &p, 16);
1494 	} else
1495 		val = simple_strtoul(p, &p, 10);
1496 	if (p == buffer) {
1497 		IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1498 	} else {
1499 		priv->ieee->scan_age = val;
1500 		IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1501 	}
1502 
1503 	IPW_DEBUG_INFO("exit\n");
1504 	return len;
1505 }
1506 
1507 static DEVICE_ATTR_RW(scan_age);
1508 
1509 static ssize_t led_show(struct device *d, struct device_attribute *attr,
1510 			char *buf)
1511 {
1512 	struct ipw_priv *priv = dev_get_drvdata(d);
1513 	return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1514 }
1515 
1516 static ssize_t led_store(struct device *d, struct device_attribute *attr,
1517 			 const char *buf, size_t count)
1518 {
1519 	struct ipw_priv *priv = dev_get_drvdata(d);
1520 
1521 	IPW_DEBUG_INFO("enter\n");
1522 
1523 	if (count == 0)
1524 		return 0;
1525 
1526 	if (*buf == 0) {
1527 		IPW_DEBUG_LED("Disabling LED control.\n");
1528 		priv->config |= CFG_NO_LED;
1529 		ipw_led_shutdown(priv);
1530 	} else {
1531 		IPW_DEBUG_LED("Enabling LED control.\n");
1532 		priv->config &= ~CFG_NO_LED;
1533 		ipw_led_init(priv);
1534 	}
1535 
1536 	IPW_DEBUG_INFO("exit\n");
1537 	return count;
1538 }
1539 
1540 static DEVICE_ATTR_RW(led);
1541 
1542 static ssize_t status_show(struct device *d,
1543 			   struct device_attribute *attr, char *buf)
1544 {
1545 	struct ipw_priv *p = dev_get_drvdata(d);
1546 	return sprintf(buf, "0x%08x\n", (int)p->status);
1547 }
1548 
1549 static DEVICE_ATTR_RO(status);
1550 
1551 static ssize_t cfg_show(struct device *d, struct device_attribute *attr,
1552 			char *buf)
1553 {
1554 	struct ipw_priv *p = dev_get_drvdata(d);
1555 	return sprintf(buf, "0x%08x\n", (int)p->config);
1556 }
1557 
1558 static DEVICE_ATTR_RO(cfg);
1559 
1560 static ssize_t nic_type_show(struct device *d,
1561 			     struct device_attribute *attr, char *buf)
1562 {
1563 	struct ipw_priv *priv = dev_get_drvdata(d);
1564 	return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1565 }
1566 
1567 static DEVICE_ATTR_RO(nic_type);
1568 
1569 static ssize_t ucode_version_show(struct device *d,
1570 				  struct device_attribute *attr, char *buf)
1571 {
1572 	u32 len = sizeof(u32), tmp = 0;
1573 	struct ipw_priv *p = dev_get_drvdata(d);
1574 
1575 	if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1576 		return 0;
1577 
1578 	return sprintf(buf, "0x%08x\n", tmp);
1579 }
1580 
1581 static DEVICE_ATTR_RO(ucode_version);
1582 
1583 static ssize_t rtc_show(struct device *d, struct device_attribute *attr,
1584 			char *buf)
1585 {
1586 	u32 len = sizeof(u32), tmp = 0;
1587 	struct ipw_priv *p = dev_get_drvdata(d);
1588 
1589 	if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1590 		return 0;
1591 
1592 	return sprintf(buf, "0x%08x\n", tmp);
1593 }
1594 
1595 static DEVICE_ATTR_RO(rtc);
1596 
1597 /*
1598  * Add a device attribute to view/control the delay between eeprom
1599  * operations.
1600  */
1601 static ssize_t eeprom_delay_show(struct device *d,
1602 				 struct device_attribute *attr, char *buf)
1603 {
1604 	struct ipw_priv *p = dev_get_drvdata(d);
1605 	int n = p->eeprom_delay;
1606 	return sprintf(buf, "%i\n", n);
1607 }
1608 static ssize_t eeprom_delay_store(struct device *d,
1609 				  struct device_attribute *attr,
1610 				  const char *buf, size_t count)
1611 {
1612 	struct ipw_priv *p = dev_get_drvdata(d);
1613 	sscanf(buf, "%i", &p->eeprom_delay);
1614 	return strnlen(buf, count);
1615 }
1616 
1617 static DEVICE_ATTR_RW(eeprom_delay);
1618 
1619 static ssize_t command_event_reg_show(struct device *d,
1620 				      struct device_attribute *attr, char *buf)
1621 {
1622 	u32 reg = 0;
1623 	struct ipw_priv *p = dev_get_drvdata(d);
1624 
1625 	reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1626 	return sprintf(buf, "0x%08x\n", reg);
1627 }
1628 static ssize_t command_event_reg_store(struct device *d,
1629 				       struct device_attribute *attr,
1630 				       const char *buf, size_t count)
1631 {
1632 	u32 reg;
1633 	struct ipw_priv *p = dev_get_drvdata(d);
1634 
1635 	sscanf(buf, "%x", &reg);
1636 	ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1637 	return strnlen(buf, count);
1638 }
1639 
1640 static DEVICE_ATTR_RW(command_event_reg);
1641 
1642 static ssize_t mem_gpio_reg_show(struct device *d,
1643 				 struct device_attribute *attr, char *buf)
1644 {
1645 	u32 reg = 0;
1646 	struct ipw_priv *p = dev_get_drvdata(d);
1647 
1648 	reg = ipw_read_reg32(p, 0x301100);
1649 	return sprintf(buf, "0x%08x\n", reg);
1650 }
1651 static ssize_t mem_gpio_reg_store(struct device *d,
1652 				  struct device_attribute *attr,
1653 				  const char *buf, size_t count)
1654 {
1655 	u32 reg;
1656 	struct ipw_priv *p = dev_get_drvdata(d);
1657 
1658 	sscanf(buf, "%x", &reg);
1659 	ipw_write_reg32(p, 0x301100, reg);
1660 	return strnlen(buf, count);
1661 }
1662 
1663 static DEVICE_ATTR_RW(mem_gpio_reg);
1664 
1665 static ssize_t indirect_dword_show(struct device *d,
1666 				   struct device_attribute *attr, char *buf)
1667 {
1668 	u32 reg = 0;
1669 	struct ipw_priv *priv = dev_get_drvdata(d);
1670 
1671 	if (priv->status & STATUS_INDIRECT_DWORD)
1672 		reg = ipw_read_reg32(priv, priv->indirect_dword);
1673 	else
1674 		reg = 0;
1675 
1676 	return sprintf(buf, "0x%08x\n", reg);
1677 }
1678 static ssize_t indirect_dword_store(struct device *d,
1679 				    struct device_attribute *attr,
1680 				    const char *buf, size_t count)
1681 {
1682 	struct ipw_priv *priv = dev_get_drvdata(d);
1683 
1684 	sscanf(buf, "%x", &priv->indirect_dword);
1685 	priv->status |= STATUS_INDIRECT_DWORD;
1686 	return strnlen(buf, count);
1687 }
1688 
1689 static DEVICE_ATTR_RW(indirect_dword);
1690 
1691 static ssize_t indirect_byte_show(struct device *d,
1692 				  struct device_attribute *attr, char *buf)
1693 {
1694 	u8 reg = 0;
1695 	struct ipw_priv *priv = dev_get_drvdata(d);
1696 
1697 	if (priv->status & STATUS_INDIRECT_BYTE)
1698 		reg = ipw_read_reg8(priv, priv->indirect_byte);
1699 	else
1700 		reg = 0;
1701 
1702 	return sprintf(buf, "0x%02x\n", reg);
1703 }
1704 static ssize_t indirect_byte_store(struct device *d,
1705 				   struct device_attribute *attr,
1706 				   const char *buf, size_t count)
1707 {
1708 	struct ipw_priv *priv = dev_get_drvdata(d);
1709 
1710 	sscanf(buf, "%x", &priv->indirect_byte);
1711 	priv->status |= STATUS_INDIRECT_BYTE;
1712 	return strnlen(buf, count);
1713 }
1714 
1715 static DEVICE_ATTR_RW(indirect_byte);
1716 
1717 static ssize_t direct_dword_show(struct device *d,
1718 				 struct device_attribute *attr, char *buf)
1719 {
1720 	u32 reg = 0;
1721 	struct ipw_priv *priv = dev_get_drvdata(d);
1722 
1723 	if (priv->status & STATUS_DIRECT_DWORD)
1724 		reg = ipw_read32(priv, priv->direct_dword);
1725 	else
1726 		reg = 0;
1727 
1728 	return sprintf(buf, "0x%08x\n", reg);
1729 }
1730 static ssize_t direct_dword_store(struct device *d,
1731 				  struct device_attribute *attr,
1732 				  const char *buf, size_t count)
1733 {
1734 	struct ipw_priv *priv = dev_get_drvdata(d);
1735 
1736 	sscanf(buf, "%x", &priv->direct_dword);
1737 	priv->status |= STATUS_DIRECT_DWORD;
1738 	return strnlen(buf, count);
1739 }
1740 
1741 static DEVICE_ATTR_RW(direct_dword);
1742 
1743 static int rf_kill_active(struct ipw_priv *priv)
1744 {
1745 	if (0 == (ipw_read32(priv, 0x30) & 0x10000)) {
1746 		priv->status |= STATUS_RF_KILL_HW;
1747 		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
1748 	} else {
1749 		priv->status &= ~STATUS_RF_KILL_HW;
1750 		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false);
1751 	}
1752 
1753 	return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1754 }
1755 
1756 static ssize_t rf_kill_show(struct device *d, struct device_attribute *attr,
1757 			    char *buf)
1758 {
1759 	/* 0 - RF kill not enabled
1760 	   1 - SW based RF kill active (sysfs)
1761 	   2 - HW based RF kill active
1762 	   3 - Both HW and SW baed RF kill active */
1763 	struct ipw_priv *priv = dev_get_drvdata(d);
1764 	int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1765 	    (rf_kill_active(priv) ? 0x2 : 0x0);
1766 	return sprintf(buf, "%i\n", val);
1767 }
1768 
1769 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1770 {
1771 	if ((disable_radio ? 1 : 0) ==
1772 	    ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1773 		return 0;
1774 
1775 	IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO  %s\n",
1776 			  disable_radio ? "OFF" : "ON");
1777 
1778 	if (disable_radio) {
1779 		priv->status |= STATUS_RF_KILL_SW;
1780 
1781 		cancel_delayed_work(&priv->request_scan);
1782 		cancel_delayed_work(&priv->request_direct_scan);
1783 		cancel_delayed_work(&priv->request_passive_scan);
1784 		cancel_delayed_work(&priv->scan_event);
1785 		schedule_work(&priv->down);
1786 	} else {
1787 		priv->status &= ~STATUS_RF_KILL_SW;
1788 		if (rf_kill_active(priv)) {
1789 			IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1790 					  "disabled by HW switch\n");
1791 			/* Make sure the RF_KILL check timer is running */
1792 			cancel_delayed_work(&priv->rf_kill);
1793 			schedule_delayed_work(&priv->rf_kill,
1794 					      round_jiffies_relative(2 * HZ));
1795 		} else
1796 			schedule_work(&priv->up);
1797 	}
1798 
1799 	return 1;
1800 }
1801 
1802 static ssize_t rf_kill_store(struct device *d, struct device_attribute *attr,
1803 			     const char *buf, size_t count)
1804 {
1805 	struct ipw_priv *priv = dev_get_drvdata(d);
1806 
1807 	ipw_radio_kill_sw(priv, buf[0] == '1');
1808 
1809 	return count;
1810 }
1811 
1812 static DEVICE_ATTR_RW(rf_kill);
1813 
1814 static ssize_t speed_scan_show(struct device *d, struct device_attribute *attr,
1815 			       char *buf)
1816 {
1817 	struct ipw_priv *priv = dev_get_drvdata(d);
1818 	int pos = 0, len = 0;
1819 	if (priv->config & CFG_SPEED_SCAN) {
1820 		while (priv->speed_scan[pos] != 0)
1821 			len += sprintf(&buf[len], "%d ",
1822 				       priv->speed_scan[pos++]);
1823 		return len + sprintf(&buf[len], "\n");
1824 	}
1825 
1826 	return sprintf(buf, "0\n");
1827 }
1828 
1829 static ssize_t speed_scan_store(struct device *d, struct device_attribute *attr,
1830 				const char *buf, size_t count)
1831 {
1832 	struct ipw_priv *priv = dev_get_drvdata(d);
1833 	int channel, pos = 0;
1834 	const char *p = buf;
1835 
1836 	/* list of space separated channels to scan, optionally ending with 0 */
1837 	while ((channel = simple_strtol(p, NULL, 0))) {
1838 		if (pos == MAX_SPEED_SCAN - 1) {
1839 			priv->speed_scan[pos] = 0;
1840 			break;
1841 		}
1842 
1843 		if (libipw_is_valid_channel(priv->ieee, channel))
1844 			priv->speed_scan[pos++] = channel;
1845 		else
1846 			IPW_WARNING("Skipping invalid channel request: %d\n",
1847 				    channel);
1848 		p = strchr(p, ' ');
1849 		if (!p)
1850 			break;
1851 		while (*p == ' ' || *p == '\t')
1852 			p++;
1853 	}
1854 
1855 	if (pos == 0)
1856 		priv->config &= ~CFG_SPEED_SCAN;
1857 	else {
1858 		priv->speed_scan_pos = 0;
1859 		priv->config |= CFG_SPEED_SCAN;
1860 	}
1861 
1862 	return count;
1863 }
1864 
1865 static DEVICE_ATTR_RW(speed_scan);
1866 
1867 static ssize_t net_stats_show(struct device *d, struct device_attribute *attr,
1868 			      char *buf)
1869 {
1870 	struct ipw_priv *priv = dev_get_drvdata(d);
1871 	return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1872 }
1873 
1874 static ssize_t net_stats_store(struct device *d, struct device_attribute *attr,
1875 			       const char *buf, size_t count)
1876 {
1877 	struct ipw_priv *priv = dev_get_drvdata(d);
1878 	if (buf[0] == '1')
1879 		priv->config |= CFG_NET_STATS;
1880 	else
1881 		priv->config &= ~CFG_NET_STATS;
1882 
1883 	return count;
1884 }
1885 
1886 static DEVICE_ATTR_RW(net_stats);
1887 
1888 static ssize_t channels_show(struct device *d,
1889 			     struct device_attribute *attr,
1890 			     char *buf)
1891 {
1892 	struct ipw_priv *priv = dev_get_drvdata(d);
1893 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
1894 	int len = 0, i;
1895 
1896 	len = sprintf(&buf[len],
1897 		      "Displaying %d channels in 2.4Ghz band "
1898 		      "(802.11bg):\n", geo->bg_channels);
1899 
1900 	for (i = 0; i < geo->bg_channels; i++) {
1901 		len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1902 			       geo->bg[i].channel,
1903 			       geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT ?
1904 			       " (radar spectrum)" : "",
1905 			       ((geo->bg[i].flags & LIBIPW_CH_NO_IBSS) ||
1906 				(geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT))
1907 			       ? "" : ", IBSS",
1908 			       geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1909 			       "passive only" : "active/passive",
1910 			       geo->bg[i].flags & LIBIPW_CH_B_ONLY ?
1911 			       "B" : "B/G");
1912 	}
1913 
1914 	len += sprintf(&buf[len],
1915 		       "Displaying %d channels in 5.2Ghz band "
1916 		       "(802.11a):\n", geo->a_channels);
1917 	for (i = 0; i < geo->a_channels; i++) {
1918 		len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1919 			       geo->a[i].channel,
1920 			       geo->a[i].flags & LIBIPW_CH_RADAR_DETECT ?
1921 			       " (radar spectrum)" : "",
1922 			       ((geo->a[i].flags & LIBIPW_CH_NO_IBSS) ||
1923 				(geo->a[i].flags & LIBIPW_CH_RADAR_DETECT))
1924 			       ? "" : ", IBSS",
1925 			       geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1926 			       "passive only" : "active/passive");
1927 	}
1928 
1929 	return len;
1930 }
1931 
1932 static DEVICE_ATTR_ADMIN_RO(channels);
1933 
1934 static void notify_wx_assoc_event(struct ipw_priv *priv)
1935 {
1936 	union iwreq_data wrqu;
1937 	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1938 	if (priv->status & STATUS_ASSOCIATED)
1939 		memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1940 	else
1941 		eth_zero_addr(wrqu.ap_addr.sa_data);
1942 	wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1943 }
1944 
1945 static void ipw_irq_tasklet(struct tasklet_struct *t)
1946 {
1947 	struct ipw_priv *priv = from_tasklet(priv, t, irq_tasklet);
1948 	u32 inta, inta_mask, handled = 0;
1949 	unsigned long flags;
1950 
1951 	spin_lock_irqsave(&priv->irq_lock, flags);
1952 
1953 	inta = ipw_read32(priv, IPW_INTA_RW);
1954 	inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1955 
1956 	if (inta == 0xFFFFFFFF) {
1957 		/* Hardware disappeared */
1958 		IPW_WARNING("TASKLET INTA == 0xFFFFFFFF\n");
1959 		/* Only handle the cached INTA values */
1960 		inta = 0;
1961 	}
1962 	inta &= (IPW_INTA_MASK_ALL & inta_mask);
1963 
1964 	/* Add any cached INTA values that need to be handled */
1965 	inta |= priv->isr_inta;
1966 
1967 	spin_unlock_irqrestore(&priv->irq_lock, flags);
1968 
1969 	spin_lock_irqsave(&priv->lock, flags);
1970 
1971 	/* handle all the justifications for the interrupt */
1972 	if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1973 		ipw_rx(priv);
1974 		handled |= IPW_INTA_BIT_RX_TRANSFER;
1975 	}
1976 
1977 	if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1978 		IPW_DEBUG_HC("Command completed.\n");
1979 		ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1980 		priv->status &= ~STATUS_HCMD_ACTIVE;
1981 		wake_up_interruptible(&priv->wait_command_queue);
1982 		handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1983 	}
1984 
1985 	if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1986 		IPW_DEBUG_TX("TX_QUEUE_1\n");
1987 		ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1988 		handled |= IPW_INTA_BIT_TX_QUEUE_1;
1989 	}
1990 
1991 	if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1992 		IPW_DEBUG_TX("TX_QUEUE_2\n");
1993 		ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1994 		handled |= IPW_INTA_BIT_TX_QUEUE_2;
1995 	}
1996 
1997 	if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
1998 		IPW_DEBUG_TX("TX_QUEUE_3\n");
1999 		ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
2000 		handled |= IPW_INTA_BIT_TX_QUEUE_3;
2001 	}
2002 
2003 	if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
2004 		IPW_DEBUG_TX("TX_QUEUE_4\n");
2005 		ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
2006 		handled |= IPW_INTA_BIT_TX_QUEUE_4;
2007 	}
2008 
2009 	if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
2010 		IPW_WARNING("STATUS_CHANGE\n");
2011 		handled |= IPW_INTA_BIT_STATUS_CHANGE;
2012 	}
2013 
2014 	if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
2015 		IPW_WARNING("TX_PERIOD_EXPIRED\n");
2016 		handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
2017 	}
2018 
2019 	if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
2020 		IPW_WARNING("HOST_CMD_DONE\n");
2021 		handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
2022 	}
2023 
2024 	if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
2025 		IPW_WARNING("FW_INITIALIZATION_DONE\n");
2026 		handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
2027 	}
2028 
2029 	if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
2030 		IPW_WARNING("PHY_OFF_DONE\n");
2031 		handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
2032 	}
2033 
2034 	if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
2035 		IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
2036 		priv->status |= STATUS_RF_KILL_HW;
2037 		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
2038 		wake_up_interruptible(&priv->wait_command_queue);
2039 		priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2040 		cancel_delayed_work(&priv->request_scan);
2041 		cancel_delayed_work(&priv->request_direct_scan);
2042 		cancel_delayed_work(&priv->request_passive_scan);
2043 		cancel_delayed_work(&priv->scan_event);
2044 		schedule_work(&priv->link_down);
2045 		schedule_delayed_work(&priv->rf_kill, 2 * HZ);
2046 		handled |= IPW_INTA_BIT_RF_KILL_DONE;
2047 	}
2048 
2049 	if (inta & IPW_INTA_BIT_FATAL_ERROR) {
2050 		IPW_WARNING("Firmware error detected.  Restarting.\n");
2051 		if (priv->error) {
2052 			IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
2053 			if (ipw_debug_level & IPW_DL_FW_ERRORS) {
2054 				struct ipw_fw_error *error =
2055 				    ipw_alloc_error_log(priv);
2056 				ipw_dump_error_log(priv, error);
2057 				kfree(error);
2058 			}
2059 		} else {
2060 			priv->error = ipw_alloc_error_log(priv);
2061 			if (priv->error)
2062 				IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2063 			else
2064 				IPW_DEBUG_FW("Error allocating sysfs 'error' "
2065 					     "log.\n");
2066 			if (ipw_debug_level & IPW_DL_FW_ERRORS)
2067 				ipw_dump_error_log(priv, priv->error);
2068 		}
2069 
2070 		/* XXX: If hardware encryption is for WPA/WPA2,
2071 		 * we have to notify the supplicant. */
2072 		if (priv->ieee->sec.encrypt) {
2073 			priv->status &= ~STATUS_ASSOCIATED;
2074 			notify_wx_assoc_event(priv);
2075 		}
2076 
2077 		/* Keep the restart process from trying to send host
2078 		 * commands by clearing the INIT status bit */
2079 		priv->status &= ~STATUS_INIT;
2080 
2081 		/* Cancel currently queued command. */
2082 		priv->status &= ~STATUS_HCMD_ACTIVE;
2083 		wake_up_interruptible(&priv->wait_command_queue);
2084 
2085 		schedule_work(&priv->adapter_restart);
2086 		handled |= IPW_INTA_BIT_FATAL_ERROR;
2087 	}
2088 
2089 	if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2090 		IPW_ERROR("Parity error\n");
2091 		handled |= IPW_INTA_BIT_PARITY_ERROR;
2092 	}
2093 
2094 	if (handled != inta) {
2095 		IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2096 	}
2097 
2098 	spin_unlock_irqrestore(&priv->lock, flags);
2099 
2100 	/* enable all interrupts */
2101 	ipw_enable_interrupts(priv);
2102 }
2103 
2104 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2105 static char *get_cmd_string(u8 cmd)
2106 {
2107 	switch (cmd) {
2108 		IPW_CMD(HOST_COMPLETE);
2109 		IPW_CMD(POWER_DOWN);
2110 		IPW_CMD(SYSTEM_CONFIG);
2111 		IPW_CMD(MULTICAST_ADDRESS);
2112 		IPW_CMD(SSID);
2113 		IPW_CMD(ADAPTER_ADDRESS);
2114 		IPW_CMD(PORT_TYPE);
2115 		IPW_CMD(RTS_THRESHOLD);
2116 		IPW_CMD(FRAG_THRESHOLD);
2117 		IPW_CMD(POWER_MODE);
2118 		IPW_CMD(WEP_KEY);
2119 		IPW_CMD(TGI_TX_KEY);
2120 		IPW_CMD(SCAN_REQUEST);
2121 		IPW_CMD(SCAN_REQUEST_EXT);
2122 		IPW_CMD(ASSOCIATE);
2123 		IPW_CMD(SUPPORTED_RATES);
2124 		IPW_CMD(SCAN_ABORT);
2125 		IPW_CMD(TX_FLUSH);
2126 		IPW_CMD(QOS_PARAMETERS);
2127 		IPW_CMD(DINO_CONFIG);
2128 		IPW_CMD(RSN_CAPABILITIES);
2129 		IPW_CMD(RX_KEY);
2130 		IPW_CMD(CARD_DISABLE);
2131 		IPW_CMD(SEED_NUMBER);
2132 		IPW_CMD(TX_POWER);
2133 		IPW_CMD(COUNTRY_INFO);
2134 		IPW_CMD(AIRONET_INFO);
2135 		IPW_CMD(AP_TX_POWER);
2136 		IPW_CMD(CCKM_INFO);
2137 		IPW_CMD(CCX_VER_INFO);
2138 		IPW_CMD(SET_CALIBRATION);
2139 		IPW_CMD(SENSITIVITY_CALIB);
2140 		IPW_CMD(RETRY_LIMIT);
2141 		IPW_CMD(IPW_PRE_POWER_DOWN);
2142 		IPW_CMD(VAP_BEACON_TEMPLATE);
2143 		IPW_CMD(VAP_DTIM_PERIOD);
2144 		IPW_CMD(EXT_SUPPORTED_RATES);
2145 		IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2146 		IPW_CMD(VAP_QUIET_INTERVALS);
2147 		IPW_CMD(VAP_CHANNEL_SWITCH);
2148 		IPW_CMD(VAP_MANDATORY_CHANNELS);
2149 		IPW_CMD(VAP_CELL_PWR_LIMIT);
2150 		IPW_CMD(VAP_CF_PARAM_SET);
2151 		IPW_CMD(VAP_SET_BEACONING_STATE);
2152 		IPW_CMD(MEASUREMENT);
2153 		IPW_CMD(POWER_CAPABILITY);
2154 		IPW_CMD(SUPPORTED_CHANNELS);
2155 		IPW_CMD(TPC_REPORT);
2156 		IPW_CMD(WME_INFO);
2157 		IPW_CMD(PRODUCTION_COMMAND);
2158 	default:
2159 		return "UNKNOWN";
2160 	}
2161 }
2162 
2163 #define HOST_COMPLETE_TIMEOUT HZ
2164 
2165 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2166 {
2167 	int rc = 0;
2168 	unsigned long flags;
2169 	unsigned long now, end;
2170 
2171 	spin_lock_irqsave(&priv->lock, flags);
2172 	if (priv->status & STATUS_HCMD_ACTIVE) {
2173 		IPW_ERROR("Failed to send %s: Already sending a command.\n",
2174 			  get_cmd_string(cmd->cmd));
2175 		spin_unlock_irqrestore(&priv->lock, flags);
2176 		return -EAGAIN;
2177 	}
2178 
2179 	priv->status |= STATUS_HCMD_ACTIVE;
2180 
2181 	if (priv->cmdlog) {
2182 		priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2183 		priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2184 		priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2185 		memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2186 		       cmd->len);
2187 		priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2188 	}
2189 
2190 	IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2191 		     get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2192 		     priv->status);
2193 
2194 #ifndef DEBUG_CMD_WEP_KEY
2195 	if (cmd->cmd == IPW_CMD_WEP_KEY)
2196 		IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2197 	else
2198 #endif
2199 		printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2200 
2201 	rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2202 	if (rc) {
2203 		priv->status &= ~STATUS_HCMD_ACTIVE;
2204 		IPW_ERROR("Failed to send %s: Reason %d\n",
2205 			  get_cmd_string(cmd->cmd), rc);
2206 		spin_unlock_irqrestore(&priv->lock, flags);
2207 		goto exit;
2208 	}
2209 	spin_unlock_irqrestore(&priv->lock, flags);
2210 
2211 	now = jiffies;
2212 	end = now + HOST_COMPLETE_TIMEOUT;
2213 again:
2214 	rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2215 					      !(priv->
2216 						status & STATUS_HCMD_ACTIVE),
2217 					      end - now);
2218 	if (rc < 0) {
2219 		now = jiffies;
2220 		if (time_before(now, end))
2221 			goto again;
2222 		rc = 0;
2223 	}
2224 
2225 	if (rc == 0) {
2226 		spin_lock_irqsave(&priv->lock, flags);
2227 		if (priv->status & STATUS_HCMD_ACTIVE) {
2228 			IPW_ERROR("Failed to send %s: Command timed out.\n",
2229 				  get_cmd_string(cmd->cmd));
2230 			priv->status &= ~STATUS_HCMD_ACTIVE;
2231 			spin_unlock_irqrestore(&priv->lock, flags);
2232 			rc = -EIO;
2233 			goto exit;
2234 		}
2235 		spin_unlock_irqrestore(&priv->lock, flags);
2236 	} else
2237 		rc = 0;
2238 
2239 	if (priv->status & STATUS_RF_KILL_HW) {
2240 		IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2241 			  get_cmd_string(cmd->cmd));
2242 		rc = -EIO;
2243 		goto exit;
2244 	}
2245 
2246       exit:
2247 	if (priv->cmdlog) {
2248 		priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2249 		priv->cmdlog_pos %= priv->cmdlog_len;
2250 	}
2251 	return rc;
2252 }
2253 
2254 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2255 {
2256 	struct host_cmd cmd = {
2257 		.cmd = command,
2258 	};
2259 
2260 	return __ipw_send_cmd(priv, &cmd);
2261 }
2262 
2263 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2264 			    const void *data)
2265 {
2266 	struct host_cmd cmd = {
2267 		.cmd = command,
2268 		.len = len,
2269 		.param = data,
2270 	};
2271 
2272 	return __ipw_send_cmd(priv, &cmd);
2273 }
2274 
2275 static int ipw_send_host_complete(struct ipw_priv *priv)
2276 {
2277 	if (!priv) {
2278 		IPW_ERROR("Invalid args\n");
2279 		return -1;
2280 	}
2281 
2282 	return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2283 }
2284 
2285 static int ipw_send_system_config(struct ipw_priv *priv)
2286 {
2287 	return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2288 				sizeof(priv->sys_config),
2289 				&priv->sys_config);
2290 }
2291 
2292 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2293 {
2294 	if (!priv || !ssid) {
2295 		IPW_ERROR("Invalid args\n");
2296 		return -1;
2297 	}
2298 
2299 	return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2300 				ssid);
2301 }
2302 
2303 static int ipw_send_adapter_address(struct ipw_priv *priv, const u8 * mac)
2304 {
2305 	if (!priv || !mac) {
2306 		IPW_ERROR("Invalid args\n");
2307 		return -1;
2308 	}
2309 
2310 	IPW_DEBUG_INFO("%s: Setting MAC to %pM\n",
2311 		       priv->net_dev->name, mac);
2312 
2313 	return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2314 }
2315 
2316 static void ipw_adapter_restart(void *adapter)
2317 {
2318 	struct ipw_priv *priv = adapter;
2319 
2320 	if (priv->status & STATUS_RF_KILL_MASK)
2321 		return;
2322 
2323 	ipw_down(priv);
2324 
2325 	if (priv->assoc_network &&
2326 	    (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2327 		ipw_remove_current_network(priv);
2328 
2329 	if (ipw_up(priv)) {
2330 		IPW_ERROR("Failed to up device\n");
2331 		return;
2332 	}
2333 }
2334 
2335 static void ipw_bg_adapter_restart(struct work_struct *work)
2336 {
2337 	struct ipw_priv *priv =
2338 		container_of(work, struct ipw_priv, adapter_restart);
2339 	mutex_lock(&priv->mutex);
2340 	ipw_adapter_restart(priv);
2341 	mutex_unlock(&priv->mutex);
2342 }
2343 
2344 static void ipw_abort_scan(struct ipw_priv *priv);
2345 
2346 #define IPW_SCAN_CHECK_WATCHDOG	(5 * HZ)
2347 
2348 static void ipw_scan_check(void *data)
2349 {
2350 	struct ipw_priv *priv = data;
2351 
2352 	if (priv->status & STATUS_SCAN_ABORTING) {
2353 		IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2354 			       "adapter after (%dms).\n",
2355 			       jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2356 		schedule_work(&priv->adapter_restart);
2357 	} else if (priv->status & STATUS_SCANNING) {
2358 		IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
2359 			       "after (%dms).\n",
2360 			       jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2361 		ipw_abort_scan(priv);
2362 		schedule_delayed_work(&priv->scan_check, HZ);
2363 	}
2364 }
2365 
2366 static void ipw_bg_scan_check(struct work_struct *work)
2367 {
2368 	struct ipw_priv *priv =
2369 		container_of(work, struct ipw_priv, scan_check.work);
2370 	mutex_lock(&priv->mutex);
2371 	ipw_scan_check(priv);
2372 	mutex_unlock(&priv->mutex);
2373 }
2374 
2375 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2376 				     struct ipw_scan_request_ext *request)
2377 {
2378 	return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2379 				sizeof(*request), request);
2380 }
2381 
2382 static int ipw_send_scan_abort(struct ipw_priv *priv)
2383 {
2384 	if (!priv) {
2385 		IPW_ERROR("Invalid args\n");
2386 		return -1;
2387 	}
2388 
2389 	return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2390 }
2391 
2392 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2393 {
2394 	struct ipw_sensitivity_calib calib = {
2395 		.beacon_rssi_raw = cpu_to_le16(sens),
2396 	};
2397 
2398 	return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2399 				&calib);
2400 }
2401 
2402 static int ipw_send_associate(struct ipw_priv *priv,
2403 			      struct ipw_associate *associate)
2404 {
2405 	if (!priv || !associate) {
2406 		IPW_ERROR("Invalid args\n");
2407 		return -1;
2408 	}
2409 
2410 	return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate),
2411 				associate);
2412 }
2413 
2414 static int ipw_send_supported_rates(struct ipw_priv *priv,
2415 				    struct ipw_supported_rates *rates)
2416 {
2417 	if (!priv || !rates) {
2418 		IPW_ERROR("Invalid args\n");
2419 		return -1;
2420 	}
2421 
2422 	return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2423 				rates);
2424 }
2425 
2426 static int ipw_set_random_seed(struct ipw_priv *priv)
2427 {
2428 	u32 val;
2429 
2430 	if (!priv) {
2431 		IPW_ERROR("Invalid args\n");
2432 		return -1;
2433 	}
2434 
2435 	get_random_bytes(&val, sizeof(val));
2436 
2437 	return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2438 }
2439 
2440 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2441 {
2442 	__le32 v = cpu_to_le32(phy_off);
2443 	if (!priv) {
2444 		IPW_ERROR("Invalid args\n");
2445 		return -1;
2446 	}
2447 
2448 	return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v);
2449 }
2450 
2451 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2452 {
2453 	if (!priv || !power) {
2454 		IPW_ERROR("Invalid args\n");
2455 		return -1;
2456 	}
2457 
2458 	return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2459 }
2460 
2461 static int ipw_set_tx_power(struct ipw_priv *priv)
2462 {
2463 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
2464 	struct ipw_tx_power tx_power;
2465 	s8 max_power;
2466 	int i;
2467 
2468 	memset(&tx_power, 0, sizeof(tx_power));
2469 
2470 	/* configure device for 'G' band */
2471 	tx_power.ieee_mode = IPW_G_MODE;
2472 	tx_power.num_channels = geo->bg_channels;
2473 	for (i = 0; i < geo->bg_channels; i++) {
2474 		max_power = geo->bg[i].max_power;
2475 		tx_power.channels_tx_power[i].channel_number =
2476 		    geo->bg[i].channel;
2477 		tx_power.channels_tx_power[i].tx_power = max_power ?
2478 		    min(max_power, priv->tx_power) : priv->tx_power;
2479 	}
2480 	if (ipw_send_tx_power(priv, &tx_power))
2481 		return -EIO;
2482 
2483 	/* configure device to also handle 'B' band */
2484 	tx_power.ieee_mode = IPW_B_MODE;
2485 	if (ipw_send_tx_power(priv, &tx_power))
2486 		return -EIO;
2487 
2488 	/* configure device to also handle 'A' band */
2489 	if (priv->ieee->abg_true) {
2490 		tx_power.ieee_mode = IPW_A_MODE;
2491 		tx_power.num_channels = geo->a_channels;
2492 		for (i = 0; i < tx_power.num_channels; i++) {
2493 			max_power = geo->a[i].max_power;
2494 			tx_power.channels_tx_power[i].channel_number =
2495 			    geo->a[i].channel;
2496 			tx_power.channels_tx_power[i].tx_power = max_power ?
2497 			    min(max_power, priv->tx_power) : priv->tx_power;
2498 		}
2499 		if (ipw_send_tx_power(priv, &tx_power))
2500 			return -EIO;
2501 	}
2502 	return 0;
2503 }
2504 
2505 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2506 {
2507 	struct ipw_rts_threshold rts_threshold = {
2508 		.rts_threshold = cpu_to_le16(rts),
2509 	};
2510 
2511 	if (!priv) {
2512 		IPW_ERROR("Invalid args\n");
2513 		return -1;
2514 	}
2515 
2516 	return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2517 				sizeof(rts_threshold), &rts_threshold);
2518 }
2519 
2520 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2521 {
2522 	struct ipw_frag_threshold frag_threshold = {
2523 		.frag_threshold = cpu_to_le16(frag),
2524 	};
2525 
2526 	if (!priv) {
2527 		IPW_ERROR("Invalid args\n");
2528 		return -1;
2529 	}
2530 
2531 	return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2532 				sizeof(frag_threshold), &frag_threshold);
2533 }
2534 
2535 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2536 {
2537 	__le32 param;
2538 
2539 	if (!priv) {
2540 		IPW_ERROR("Invalid args\n");
2541 		return -1;
2542 	}
2543 
2544 	/* If on battery, set to 3, if AC set to CAM, else user
2545 	 * level */
2546 	switch (mode) {
2547 	case IPW_POWER_BATTERY:
2548 		param = cpu_to_le32(IPW_POWER_INDEX_3);
2549 		break;
2550 	case IPW_POWER_AC:
2551 		param = cpu_to_le32(IPW_POWER_MODE_CAM);
2552 		break;
2553 	default:
2554 		param = cpu_to_le32(mode);
2555 		break;
2556 	}
2557 
2558 	return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2559 				&param);
2560 }
2561 
2562 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2563 {
2564 	struct ipw_retry_limit retry_limit = {
2565 		.short_retry_limit = slimit,
2566 		.long_retry_limit = llimit
2567 	};
2568 
2569 	if (!priv) {
2570 		IPW_ERROR("Invalid args\n");
2571 		return -1;
2572 	}
2573 
2574 	return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2575 				&retry_limit);
2576 }
2577 
2578 /*
2579  * The IPW device contains a Microwire compatible EEPROM that stores
2580  * various data like the MAC address.  Usually the firmware has exclusive
2581  * access to the eeprom, but during device initialization (before the
2582  * device driver has sent the HostComplete command to the firmware) the
2583  * device driver has read access to the EEPROM by way of indirect addressing
2584  * through a couple of memory mapped registers.
2585  *
2586  * The following is a simplified implementation for pulling data out of the
2587  * the eeprom, along with some helper functions to find information in
2588  * the per device private data's copy of the eeprom.
2589  *
2590  * NOTE: To better understand how these functions work (i.e what is a chip
2591  *       select and why do have to keep driving the eeprom clock?), read
2592  *       just about any data sheet for a Microwire compatible EEPROM.
2593  */
2594 
2595 /* write a 32 bit value into the indirect accessor register */
2596 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2597 {
2598 	ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2599 
2600 	/* the eeprom requires some time to complete the operation */
2601 	udelay(p->eeprom_delay);
2602 }
2603 
2604 /* perform a chip select operation */
2605 static void eeprom_cs(struct ipw_priv *priv)
2606 {
2607 	eeprom_write_reg(priv, 0);
2608 	eeprom_write_reg(priv, EEPROM_BIT_CS);
2609 	eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2610 	eeprom_write_reg(priv, EEPROM_BIT_CS);
2611 }
2612 
2613 /* perform a chip select operation */
2614 static void eeprom_disable_cs(struct ipw_priv *priv)
2615 {
2616 	eeprom_write_reg(priv, EEPROM_BIT_CS);
2617 	eeprom_write_reg(priv, 0);
2618 	eeprom_write_reg(priv, EEPROM_BIT_SK);
2619 }
2620 
2621 /* push a single bit down to the eeprom */
2622 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2623 {
2624 	int d = (bit ? EEPROM_BIT_DI : 0);
2625 	eeprom_write_reg(p, EEPROM_BIT_CS | d);
2626 	eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2627 }
2628 
2629 /* push an opcode followed by an address down to the eeprom */
2630 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2631 {
2632 	int i;
2633 
2634 	eeprom_cs(priv);
2635 	eeprom_write_bit(priv, 1);
2636 	eeprom_write_bit(priv, op & 2);
2637 	eeprom_write_bit(priv, op & 1);
2638 	for (i = 7; i >= 0; i--) {
2639 		eeprom_write_bit(priv, addr & (1 << i));
2640 	}
2641 }
2642 
2643 /* pull 16 bits off the eeprom, one bit at a time */
2644 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2645 {
2646 	int i;
2647 	u16 r = 0;
2648 
2649 	/* Send READ Opcode */
2650 	eeprom_op(priv, EEPROM_CMD_READ, addr);
2651 
2652 	/* Send dummy bit */
2653 	eeprom_write_reg(priv, EEPROM_BIT_CS);
2654 
2655 	/* Read the byte off the eeprom one bit at a time */
2656 	for (i = 0; i < 16; i++) {
2657 		u32 data = 0;
2658 		eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2659 		eeprom_write_reg(priv, EEPROM_BIT_CS);
2660 		data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2661 		r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2662 	}
2663 
2664 	/* Send another dummy bit */
2665 	eeprom_write_reg(priv, 0);
2666 	eeprom_disable_cs(priv);
2667 
2668 	return r;
2669 }
2670 
2671 /* helper function for pulling the mac address out of the private */
2672 /* data's copy of the eeprom data                                 */
2673 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2674 {
2675 	memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], ETH_ALEN);
2676 }
2677 
2678 static void ipw_read_eeprom(struct ipw_priv *priv)
2679 {
2680 	int i;
2681 	__le16 *eeprom = (__le16 *) priv->eeprom;
2682 
2683 	IPW_DEBUG_TRACE(">>\n");
2684 
2685 	/* read entire contents of eeprom into private buffer */
2686 	for (i = 0; i < 128; i++)
2687 		eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2688 
2689 	IPW_DEBUG_TRACE("<<\n");
2690 }
2691 
2692 /*
2693  * Either the device driver (i.e. the host) or the firmware can
2694  * load eeprom data into the designated region in SRAM.  If neither
2695  * happens then the FW will shutdown with a fatal error.
2696  *
2697  * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2698  * bit needs region of shared SRAM needs to be non-zero.
2699  */
2700 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2701 {
2702 	int i;
2703 
2704 	IPW_DEBUG_TRACE(">>\n");
2705 
2706 	/*
2707 	   If the data looks correct, then copy it to our private
2708 	   copy.  Otherwise let the firmware know to perform the operation
2709 	   on its own.
2710 	 */
2711 	if (priv->eeprom[EEPROM_VERSION] != 0) {
2712 		IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2713 
2714 		/* write the eeprom data to sram */
2715 		for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2716 			ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2717 
2718 		/* Do not load eeprom data on fatal error or suspend */
2719 		ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2720 	} else {
2721 		IPW_DEBUG_INFO("Enabling FW initialization of SRAM\n");
2722 
2723 		/* Load eeprom data on fatal error or suspend */
2724 		ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2725 	}
2726 
2727 	IPW_DEBUG_TRACE("<<\n");
2728 }
2729 
2730 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2731 {
2732 	count >>= 2;
2733 	if (!count)
2734 		return;
2735 	_ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2736 	while (count--)
2737 		_ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2738 }
2739 
2740 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2741 {
2742 	ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2743 			CB_NUMBER_OF_ELEMENTS_SMALL *
2744 			sizeof(struct command_block));
2745 }
2746 
2747 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2748 {				/* start dma engine but no transfers yet */
2749 
2750 	IPW_DEBUG_FW(">> :\n");
2751 
2752 	/* Start the dma */
2753 	ipw_fw_dma_reset_command_blocks(priv);
2754 
2755 	/* Write CB base address */
2756 	ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2757 
2758 	IPW_DEBUG_FW("<< :\n");
2759 	return 0;
2760 }
2761 
2762 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2763 {
2764 	u32 control = 0;
2765 
2766 	IPW_DEBUG_FW(">> :\n");
2767 
2768 	/* set the Stop and Abort bit */
2769 	control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2770 	ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2771 	priv->sram_desc.last_cb_index = 0;
2772 
2773 	IPW_DEBUG_FW("<<\n");
2774 }
2775 
2776 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2777 					  struct command_block *cb)
2778 {
2779 	u32 address =
2780 	    IPW_SHARED_SRAM_DMA_CONTROL +
2781 	    (sizeof(struct command_block) * index);
2782 	IPW_DEBUG_FW(">> :\n");
2783 
2784 	ipw_write_indirect(priv, address, (u8 *) cb,
2785 			   (int)sizeof(struct command_block));
2786 
2787 	IPW_DEBUG_FW("<< :\n");
2788 	return 0;
2789 
2790 }
2791 
2792 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2793 {
2794 	u32 control = 0;
2795 	u32 index = 0;
2796 
2797 	IPW_DEBUG_FW(">> :\n");
2798 
2799 	for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2800 		ipw_fw_dma_write_command_block(priv, index,
2801 					       &priv->sram_desc.cb_list[index]);
2802 
2803 	/* Enable the DMA in the CSR register */
2804 	ipw_clear_bit(priv, IPW_RESET_REG,
2805 		      IPW_RESET_REG_MASTER_DISABLED |
2806 		      IPW_RESET_REG_STOP_MASTER);
2807 
2808 	/* Set the Start bit. */
2809 	control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2810 	ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2811 
2812 	IPW_DEBUG_FW("<< :\n");
2813 	return 0;
2814 }
2815 
2816 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2817 {
2818 	u32 address;
2819 	u32 register_value = 0;
2820 	u32 cb_fields_address = 0;
2821 
2822 	IPW_DEBUG_FW(">> :\n");
2823 	address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2824 	IPW_DEBUG_FW_INFO("Current CB is 0x%x\n", address);
2825 
2826 	/* Read the DMA Controlor register */
2827 	register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2828 	IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x\n", register_value);
2829 
2830 	/* Print the CB values */
2831 	cb_fields_address = address;
2832 	register_value = ipw_read_reg32(priv, cb_fields_address);
2833 	IPW_DEBUG_FW_INFO("Current CB Control Field is 0x%x\n", register_value);
2834 
2835 	cb_fields_address += sizeof(u32);
2836 	register_value = ipw_read_reg32(priv, cb_fields_address);
2837 	IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x\n", register_value);
2838 
2839 	cb_fields_address += sizeof(u32);
2840 	register_value = ipw_read_reg32(priv, cb_fields_address);
2841 	IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x\n",
2842 			  register_value);
2843 
2844 	cb_fields_address += sizeof(u32);
2845 	register_value = ipw_read_reg32(priv, cb_fields_address);
2846 	IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x\n", register_value);
2847 
2848 	IPW_DEBUG_FW(">> :\n");
2849 }
2850 
2851 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2852 {
2853 	u32 current_cb_address = 0;
2854 	u32 current_cb_index = 0;
2855 
2856 	IPW_DEBUG_FW("<< :\n");
2857 	current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2858 
2859 	current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2860 	    sizeof(struct command_block);
2861 
2862 	IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X\n",
2863 			  current_cb_index, current_cb_address);
2864 
2865 	IPW_DEBUG_FW(">> :\n");
2866 	return current_cb_index;
2867 
2868 }
2869 
2870 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2871 					u32 src_address,
2872 					u32 dest_address,
2873 					u32 length,
2874 					int interrupt_enabled, int is_last)
2875 {
2876 
2877 	u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2878 	    CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2879 	    CB_DEST_SIZE_LONG;
2880 	struct command_block *cb;
2881 	u32 last_cb_element = 0;
2882 
2883 	IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2884 			  src_address, dest_address, length);
2885 
2886 	if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2887 		return -1;
2888 
2889 	last_cb_element = priv->sram_desc.last_cb_index;
2890 	cb = &priv->sram_desc.cb_list[last_cb_element];
2891 	priv->sram_desc.last_cb_index++;
2892 
2893 	/* Calculate the new CB control word */
2894 	if (interrupt_enabled)
2895 		control |= CB_INT_ENABLED;
2896 
2897 	if (is_last)
2898 		control |= CB_LAST_VALID;
2899 
2900 	control |= length;
2901 
2902 	/* Calculate the CB Element's checksum value */
2903 	cb->status = control ^ src_address ^ dest_address;
2904 
2905 	/* Copy the Source and Destination addresses */
2906 	cb->dest_addr = dest_address;
2907 	cb->source_addr = src_address;
2908 
2909 	/* Copy the Control Word last */
2910 	cb->control = control;
2911 
2912 	return 0;
2913 }
2914 
2915 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
2916 				 int nr, u32 dest_address, u32 len)
2917 {
2918 	int ret, i;
2919 	u32 size;
2920 
2921 	IPW_DEBUG_FW(">>\n");
2922 	IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
2923 			  nr, dest_address, len);
2924 
2925 	for (i = 0; i < nr; i++) {
2926 		size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
2927 		ret = ipw_fw_dma_add_command_block(priv, src_address[i],
2928 						   dest_address +
2929 						   i * CB_MAX_LENGTH, size,
2930 						   0, 0);
2931 		if (ret) {
2932 			IPW_DEBUG_FW_INFO(": Failed\n");
2933 			return -1;
2934 		} else
2935 			IPW_DEBUG_FW_INFO(": Added new cb\n");
2936 	}
2937 
2938 	IPW_DEBUG_FW("<<\n");
2939 	return 0;
2940 }
2941 
2942 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2943 {
2944 	u32 current_index = 0, previous_index;
2945 	u32 watchdog = 0;
2946 
2947 	IPW_DEBUG_FW(">> :\n");
2948 
2949 	current_index = ipw_fw_dma_command_block_index(priv);
2950 	IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2951 			  (int)priv->sram_desc.last_cb_index);
2952 
2953 	while (current_index < priv->sram_desc.last_cb_index) {
2954 		udelay(50);
2955 		previous_index = current_index;
2956 		current_index = ipw_fw_dma_command_block_index(priv);
2957 
2958 		if (previous_index < current_index) {
2959 			watchdog = 0;
2960 			continue;
2961 		}
2962 		if (++watchdog > 400) {
2963 			IPW_DEBUG_FW_INFO("Timeout\n");
2964 			ipw_fw_dma_dump_command_block(priv);
2965 			ipw_fw_dma_abort(priv);
2966 			return -1;
2967 		}
2968 	}
2969 
2970 	ipw_fw_dma_abort(priv);
2971 
2972 	/*Disable the DMA in the CSR register */
2973 	ipw_set_bit(priv, IPW_RESET_REG,
2974 		    IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2975 
2976 	IPW_DEBUG_FW("<< dmaWaitSync\n");
2977 	return 0;
2978 }
2979 
2980 static void ipw_remove_current_network(struct ipw_priv *priv)
2981 {
2982 	struct list_head *element, *safe;
2983 	struct libipw_network *network = NULL;
2984 	unsigned long flags;
2985 
2986 	spin_lock_irqsave(&priv->ieee->lock, flags);
2987 	list_for_each_safe(element, safe, &priv->ieee->network_list) {
2988 		network = list_entry(element, struct libipw_network, list);
2989 		if (ether_addr_equal(network->bssid, priv->bssid)) {
2990 			list_del(element);
2991 			list_add_tail(&network->list,
2992 				      &priv->ieee->network_free_list);
2993 		}
2994 	}
2995 	spin_unlock_irqrestore(&priv->ieee->lock, flags);
2996 }
2997 
2998 /*
2999  * Check that card is still alive.
3000  * Reads debug register from domain0.
3001  * If card is present, pre-defined value should
3002  * be found there.
3003  *
3004  * @param priv
3005  * @return 1 if card is present, 0 otherwise
3006  */
3007 static inline int ipw_alive(struct ipw_priv *priv)
3008 {
3009 	return ipw_read32(priv, 0x90) == 0xd55555d5;
3010 }
3011 
3012 /* timeout in msec, attempted in 10-msec quanta */
3013 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
3014 			       int timeout)
3015 {
3016 	int i = 0;
3017 
3018 	do {
3019 		if ((ipw_read32(priv, addr) & mask) == mask)
3020 			return i;
3021 		mdelay(10);
3022 		i += 10;
3023 	} while (i < timeout);
3024 
3025 	return -ETIME;
3026 }
3027 
3028 /* These functions load the firmware and micro code for the operation of
3029  * the ipw hardware.  It assumes the buffer has all the bits for the
3030  * image and the caller is handling the memory allocation and clean up.
3031  */
3032 
3033 static int ipw_stop_master(struct ipw_priv *priv)
3034 {
3035 	int rc;
3036 
3037 	IPW_DEBUG_TRACE(">>\n");
3038 	/* stop master. typical delay - 0 */
3039 	ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3040 
3041 	/* timeout is in msec, polled in 10-msec quanta */
3042 	rc = ipw_poll_bit(priv, IPW_RESET_REG,
3043 			  IPW_RESET_REG_MASTER_DISABLED, 100);
3044 	if (rc < 0) {
3045 		IPW_ERROR("wait for stop master failed after 100ms\n");
3046 		return -1;
3047 	}
3048 
3049 	IPW_DEBUG_INFO("stop master %dms\n", rc);
3050 
3051 	return rc;
3052 }
3053 
3054 static void ipw_arc_release(struct ipw_priv *priv)
3055 {
3056 	IPW_DEBUG_TRACE(">>\n");
3057 	mdelay(5);
3058 
3059 	ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3060 
3061 	/* no one knows timing, for safety add some delay */
3062 	mdelay(5);
3063 }
3064 
3065 struct fw_chunk {
3066 	__le32 address;
3067 	__le32 length;
3068 };
3069 
3070 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3071 {
3072 	int rc = 0, i, addr;
3073 	u8 cr = 0;
3074 	__le16 *image;
3075 
3076 	image = (__le16 *) data;
3077 
3078 	IPW_DEBUG_TRACE(">>\n");
3079 
3080 	rc = ipw_stop_master(priv);
3081 
3082 	if (rc < 0)
3083 		return rc;
3084 
3085 	for (addr = IPW_SHARED_LOWER_BOUND;
3086 	     addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3087 		ipw_write32(priv, addr, 0);
3088 	}
3089 
3090 	/* no ucode (yet) */
3091 	memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3092 	/* destroy DMA queues */
3093 	/* reset sequence */
3094 
3095 	ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3096 	ipw_arc_release(priv);
3097 	ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3098 	mdelay(1);
3099 
3100 	/* reset PHY */
3101 	ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3102 	mdelay(1);
3103 
3104 	ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3105 	mdelay(1);
3106 
3107 	/* enable ucode store */
3108 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3109 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3110 	mdelay(1);
3111 
3112 	/* write ucode */
3113 	/*
3114 	 * @bug
3115 	 * Do NOT set indirect address register once and then
3116 	 * store data to indirect data register in the loop.
3117 	 * It seems very reasonable, but in this case DINO do not
3118 	 * accept ucode. It is essential to set address each time.
3119 	 */
3120 	/* load new ipw uCode */
3121 	for (i = 0; i < len / 2; i++)
3122 		ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3123 				le16_to_cpu(image[i]));
3124 
3125 	/* enable DINO */
3126 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3127 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3128 
3129 	/* this is where the igx / win driver deveates from the VAP driver. */
3130 
3131 	/* wait for alive response */
3132 	for (i = 0; i < 100; i++) {
3133 		/* poll for incoming data */
3134 		cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3135 		if (cr & DINO_RXFIFO_DATA)
3136 			break;
3137 		mdelay(1);
3138 	}
3139 
3140 	if (cr & DINO_RXFIFO_DATA) {
3141 		/* alive_command_responce size is NOT multiple of 4 */
3142 		__le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3143 
3144 		for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3145 			response_buffer[i] =
3146 			    cpu_to_le32(ipw_read_reg32(priv,
3147 						       IPW_BASEBAND_RX_FIFO_READ));
3148 		memcpy(&priv->dino_alive, response_buffer,
3149 		       sizeof(priv->dino_alive));
3150 		if (priv->dino_alive.alive_command == 1
3151 		    && priv->dino_alive.ucode_valid == 1) {
3152 			rc = 0;
3153 			IPW_DEBUG_INFO
3154 			    ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3155 			     "of %02d/%02d/%02d %02d:%02d\n",
3156 			     priv->dino_alive.software_revision,
3157 			     priv->dino_alive.software_revision,
3158 			     priv->dino_alive.device_identifier,
3159 			     priv->dino_alive.device_identifier,
3160 			     priv->dino_alive.time_stamp[0],
3161 			     priv->dino_alive.time_stamp[1],
3162 			     priv->dino_alive.time_stamp[2],
3163 			     priv->dino_alive.time_stamp[3],
3164 			     priv->dino_alive.time_stamp[4]);
3165 		} else {
3166 			IPW_DEBUG_INFO("Microcode is not alive\n");
3167 			rc = -EINVAL;
3168 		}
3169 	} else {
3170 		IPW_DEBUG_INFO("No alive response from DINO\n");
3171 		rc = -ETIME;
3172 	}
3173 
3174 	/* disable DINO, otherwise for some reason
3175 	   firmware have problem getting alive resp. */
3176 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3177 
3178 	return rc;
3179 }
3180 
3181 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3182 {
3183 	int ret = -1;
3184 	int offset = 0;
3185 	struct fw_chunk *chunk;
3186 	int total_nr = 0;
3187 	int i;
3188 	struct dma_pool *pool;
3189 	void **virts;
3190 	dma_addr_t *phys;
3191 
3192 	IPW_DEBUG_TRACE("<< :\n");
3193 
3194 	virts = kmalloc_array(CB_NUMBER_OF_ELEMENTS_SMALL, sizeof(void *),
3195 			      GFP_KERNEL);
3196 	if (!virts)
3197 		return -ENOMEM;
3198 
3199 	phys = kmalloc_array(CB_NUMBER_OF_ELEMENTS_SMALL, sizeof(dma_addr_t),
3200 			     GFP_KERNEL);
3201 	if (!phys) {
3202 		kfree(virts);
3203 		return -ENOMEM;
3204 	}
3205 	pool = dma_pool_create("ipw2200", &priv->pci_dev->dev, CB_MAX_LENGTH, 0,
3206 			       0);
3207 	if (!pool) {
3208 		IPW_ERROR("dma_pool_create failed\n");
3209 		kfree(phys);
3210 		kfree(virts);
3211 		return -ENOMEM;
3212 	}
3213 
3214 	/* Start the Dma */
3215 	ret = ipw_fw_dma_enable(priv);
3216 
3217 	/* the DMA is already ready this would be a bug. */
3218 	BUG_ON(priv->sram_desc.last_cb_index > 0);
3219 
3220 	do {
3221 		u32 chunk_len;
3222 		u8 *start;
3223 		int size;
3224 		int nr = 0;
3225 
3226 		chunk = (struct fw_chunk *)(data + offset);
3227 		offset += sizeof(struct fw_chunk);
3228 		chunk_len = le32_to_cpu(chunk->length);
3229 		start = data + offset;
3230 
3231 		nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
3232 		for (i = 0; i < nr; i++) {
3233 			virts[total_nr] = dma_pool_alloc(pool, GFP_KERNEL,
3234 							 &phys[total_nr]);
3235 			if (!virts[total_nr]) {
3236 				ret = -ENOMEM;
3237 				goto out;
3238 			}
3239 			size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
3240 				     CB_MAX_LENGTH);
3241 			memcpy(virts[total_nr], start, size);
3242 			start += size;
3243 			total_nr++;
3244 			/* We don't support fw chunk larger than 64*8K */
3245 			BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
3246 		}
3247 
3248 		/* build DMA packet and queue up for sending */
3249 		/* dma to chunk->address, the chunk->length bytes from data +
3250 		 * offeset*/
3251 		/* Dma loading */
3252 		ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
3253 					    nr, le32_to_cpu(chunk->address),
3254 					    chunk_len);
3255 		if (ret) {
3256 			IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3257 			goto out;
3258 		}
3259 
3260 		offset += chunk_len;
3261 	} while (offset < len);
3262 
3263 	/* Run the DMA and wait for the answer */
3264 	ret = ipw_fw_dma_kick(priv);
3265 	if (ret) {
3266 		IPW_ERROR("dmaKick Failed\n");
3267 		goto out;
3268 	}
3269 
3270 	ret = ipw_fw_dma_wait(priv);
3271 	if (ret) {
3272 		IPW_ERROR("dmaWaitSync Failed\n");
3273 		goto out;
3274 	}
3275  out:
3276 	for (i = 0; i < total_nr; i++)
3277 		dma_pool_free(pool, virts[i], phys[i]);
3278 
3279 	dma_pool_destroy(pool);
3280 	kfree(phys);
3281 	kfree(virts);
3282 
3283 	return ret;
3284 }
3285 
3286 /* stop nic */
3287 static int ipw_stop_nic(struct ipw_priv *priv)
3288 {
3289 	int rc = 0;
3290 
3291 	/* stop */
3292 	ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3293 
3294 	rc = ipw_poll_bit(priv, IPW_RESET_REG,
3295 			  IPW_RESET_REG_MASTER_DISABLED, 500);
3296 	if (rc < 0) {
3297 		IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3298 		return rc;
3299 	}
3300 
3301 	ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3302 
3303 	return rc;
3304 }
3305 
3306 static void ipw_start_nic(struct ipw_priv *priv)
3307 {
3308 	IPW_DEBUG_TRACE(">>\n");
3309 
3310 	/* prvHwStartNic  release ARC */
3311 	ipw_clear_bit(priv, IPW_RESET_REG,
3312 		      IPW_RESET_REG_MASTER_DISABLED |
3313 		      IPW_RESET_REG_STOP_MASTER |
3314 		      CBD_RESET_REG_PRINCETON_RESET);
3315 
3316 	/* enable power management */
3317 	ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3318 		    IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3319 
3320 	IPW_DEBUG_TRACE("<<\n");
3321 }
3322 
3323 static int ipw_init_nic(struct ipw_priv *priv)
3324 {
3325 	int rc;
3326 
3327 	IPW_DEBUG_TRACE(">>\n");
3328 	/* reset */
3329 	/*prvHwInitNic */
3330 	/* set "initialization complete" bit to move adapter to D0 state */
3331 	ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3332 
3333 	/* low-level PLL activation */
3334 	ipw_write32(priv, IPW_READ_INT_REGISTER,
3335 		    IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3336 
3337 	/* wait for clock stabilization */
3338 	rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3339 			  IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3340 	if (rc < 0)
3341 		IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3342 
3343 	/* assert SW reset */
3344 	ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3345 
3346 	udelay(10);
3347 
3348 	/* set "initialization complete" bit to move adapter to D0 state */
3349 	ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3350 
3351 	IPW_DEBUG_TRACE(">>\n");
3352 	return 0;
3353 }
3354 
3355 /* Call this function from process context, it will sleep in request_firmware.
3356  * Probe is an ok place to call this from.
3357  */
3358 static int ipw_reset_nic(struct ipw_priv *priv)
3359 {
3360 	int rc = 0;
3361 	unsigned long flags;
3362 
3363 	IPW_DEBUG_TRACE(">>\n");
3364 
3365 	rc = ipw_init_nic(priv);
3366 
3367 	spin_lock_irqsave(&priv->lock, flags);
3368 	/* Clear the 'host command active' bit... */
3369 	priv->status &= ~STATUS_HCMD_ACTIVE;
3370 	wake_up_interruptible(&priv->wait_command_queue);
3371 	priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3372 	wake_up_interruptible(&priv->wait_state);
3373 	spin_unlock_irqrestore(&priv->lock, flags);
3374 
3375 	IPW_DEBUG_TRACE("<<\n");
3376 	return rc;
3377 }
3378 
3379 
3380 struct ipw_fw {
3381 	__le32 ver;
3382 	__le32 boot_size;
3383 	__le32 ucode_size;
3384 	__le32 fw_size;
3385 	u8 data[];
3386 };
3387 
3388 static int ipw_get_fw(struct ipw_priv *priv,
3389 		      const struct firmware **raw, const char *name)
3390 {
3391 	struct ipw_fw *fw;
3392 	int rc;
3393 
3394 	/* ask firmware_class module to get the boot firmware off disk */
3395 	rc = request_firmware(raw, name, &priv->pci_dev->dev);
3396 	if (rc < 0) {
3397 		IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3398 		return rc;
3399 	}
3400 
3401 	if ((*raw)->size < sizeof(*fw)) {
3402 		IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3403 		return -EINVAL;
3404 	}
3405 
3406 	fw = (void *)(*raw)->data;
3407 
3408 	if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3409 	    le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3410 		IPW_ERROR("%s is too small or corrupt (%zd)\n",
3411 			  name, (*raw)->size);
3412 		return -EINVAL;
3413 	}
3414 
3415 	IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3416 		       name,
3417 		       le32_to_cpu(fw->ver) >> 16,
3418 		       le32_to_cpu(fw->ver) & 0xff,
3419 		       (*raw)->size - sizeof(*fw));
3420 	return 0;
3421 }
3422 
3423 #define IPW_RX_BUF_SIZE (3000)
3424 
3425 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3426 				      struct ipw_rx_queue *rxq)
3427 {
3428 	unsigned long flags;
3429 	int i;
3430 
3431 	spin_lock_irqsave(&rxq->lock, flags);
3432 
3433 	INIT_LIST_HEAD(&rxq->rx_free);
3434 	INIT_LIST_HEAD(&rxq->rx_used);
3435 
3436 	/* Fill the rx_used queue with _all_ of the Rx buffers */
3437 	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3438 		/* In the reset function, these buffers may have been allocated
3439 		 * to an SKB, so we need to unmap and free potential storage */
3440 		if (rxq->pool[i].skb != NULL) {
3441 			dma_unmap_single(&priv->pci_dev->dev,
3442 					 rxq->pool[i].dma_addr,
3443 					 IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
3444 			dev_kfree_skb(rxq->pool[i].skb);
3445 			rxq->pool[i].skb = NULL;
3446 		}
3447 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3448 	}
3449 
3450 	/* Set us so that we have processed and used all buffers, but have
3451 	 * not restocked the Rx queue with fresh buffers */
3452 	rxq->read = rxq->write = 0;
3453 	rxq->free_count = 0;
3454 	spin_unlock_irqrestore(&rxq->lock, flags);
3455 }
3456 
3457 #ifdef CONFIG_PM
3458 static int fw_loaded = 0;
3459 static const struct firmware *raw = NULL;
3460 
3461 static void free_firmware(void)
3462 {
3463 	if (fw_loaded) {
3464 		release_firmware(raw);
3465 		raw = NULL;
3466 		fw_loaded = 0;
3467 	}
3468 }
3469 #else
3470 #define free_firmware() do {} while (0)
3471 #endif
3472 
3473 static int ipw_load(struct ipw_priv *priv)
3474 {
3475 #ifndef CONFIG_PM
3476 	const struct firmware *raw = NULL;
3477 #endif
3478 	struct ipw_fw *fw;
3479 	u8 *boot_img, *ucode_img, *fw_img;
3480 	u8 *name = NULL;
3481 	int rc = 0, retries = 3;
3482 
3483 	switch (priv->ieee->iw_mode) {
3484 	case IW_MODE_ADHOC:
3485 		name = "ipw2200-ibss.fw";
3486 		break;
3487 #ifdef CONFIG_IPW2200_MONITOR
3488 	case IW_MODE_MONITOR:
3489 		name = "ipw2200-sniffer.fw";
3490 		break;
3491 #endif
3492 	case IW_MODE_INFRA:
3493 		name = "ipw2200-bss.fw";
3494 		break;
3495 	}
3496 
3497 	if (!name) {
3498 		rc = -EINVAL;
3499 		goto error;
3500 	}
3501 
3502 #ifdef CONFIG_PM
3503 	if (!fw_loaded) {
3504 #endif
3505 		rc = ipw_get_fw(priv, &raw, name);
3506 		if (rc < 0)
3507 			goto error;
3508 #ifdef CONFIG_PM
3509 	}
3510 #endif
3511 
3512 	fw = (void *)raw->data;
3513 	boot_img = &fw->data[0];
3514 	ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3515 	fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3516 			   le32_to_cpu(fw->ucode_size)];
3517 
3518 	if (!priv->rxq)
3519 		priv->rxq = ipw_rx_queue_alloc(priv);
3520 	else
3521 		ipw_rx_queue_reset(priv, priv->rxq);
3522 	if (!priv->rxq) {
3523 		IPW_ERROR("Unable to initialize Rx queue\n");
3524 		rc = -ENOMEM;
3525 		goto error;
3526 	}
3527 
3528       retry:
3529 	/* Ensure interrupts are disabled */
3530 	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3531 	priv->status &= ~STATUS_INT_ENABLED;
3532 
3533 	/* ack pending interrupts */
3534 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3535 
3536 	ipw_stop_nic(priv);
3537 
3538 	rc = ipw_reset_nic(priv);
3539 	if (rc < 0) {
3540 		IPW_ERROR("Unable to reset NIC\n");
3541 		goto error;
3542 	}
3543 
3544 	ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3545 			IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3546 
3547 	/* DMA the initial boot firmware into the device */
3548 	rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3549 	if (rc < 0) {
3550 		IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3551 		goto error;
3552 	}
3553 
3554 	/* kick start the device */
3555 	ipw_start_nic(priv);
3556 
3557 	/* wait for the device to finish its initial startup sequence */
3558 	rc = ipw_poll_bit(priv, IPW_INTA_RW,
3559 			  IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3560 	if (rc < 0) {
3561 		IPW_ERROR("device failed to boot initial fw image\n");
3562 		goto error;
3563 	}
3564 	IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3565 
3566 	/* ack fw init done interrupt */
3567 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3568 
3569 	/* DMA the ucode into the device */
3570 	rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3571 	if (rc < 0) {
3572 		IPW_ERROR("Unable to load ucode: %d\n", rc);
3573 		goto error;
3574 	}
3575 
3576 	/* stop nic */
3577 	ipw_stop_nic(priv);
3578 
3579 	/* DMA bss firmware into the device */
3580 	rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3581 	if (rc < 0) {
3582 		IPW_ERROR("Unable to load firmware: %d\n", rc);
3583 		goto error;
3584 	}
3585 #ifdef CONFIG_PM
3586 	fw_loaded = 1;
3587 #endif
3588 
3589 	ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3590 
3591 	rc = ipw_queue_reset(priv);
3592 	if (rc < 0) {
3593 		IPW_ERROR("Unable to initialize queues\n");
3594 		goto error;
3595 	}
3596 
3597 	/* Ensure interrupts are disabled */
3598 	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3599 	/* ack pending interrupts */
3600 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3601 
3602 	/* kick start the device */
3603 	ipw_start_nic(priv);
3604 
3605 	if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3606 		if (retries > 0) {
3607 			IPW_WARNING("Parity error.  Retrying init.\n");
3608 			retries--;
3609 			goto retry;
3610 		}
3611 
3612 		IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3613 		rc = -EIO;
3614 		goto error;
3615 	}
3616 
3617 	/* wait for the device */
3618 	rc = ipw_poll_bit(priv, IPW_INTA_RW,
3619 			  IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3620 	if (rc < 0) {
3621 		IPW_ERROR("device failed to start within 500ms\n");
3622 		goto error;
3623 	}
3624 	IPW_DEBUG_INFO("device response after %dms\n", rc);
3625 
3626 	/* ack fw init done interrupt */
3627 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3628 
3629 	/* read eeprom data */
3630 	priv->eeprom_delay = 1;
3631 	ipw_read_eeprom(priv);
3632 	/* initialize the eeprom region of sram */
3633 	ipw_eeprom_init_sram(priv);
3634 
3635 	/* enable interrupts */
3636 	ipw_enable_interrupts(priv);
3637 
3638 	/* Ensure our queue has valid packets */
3639 	ipw_rx_queue_replenish(priv);
3640 
3641 	ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3642 
3643 	/* ack pending interrupts */
3644 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3645 
3646 #ifndef CONFIG_PM
3647 	release_firmware(raw);
3648 #endif
3649 	return 0;
3650 
3651       error:
3652 	if (priv->rxq) {
3653 		ipw_rx_queue_free(priv, priv->rxq);
3654 		priv->rxq = NULL;
3655 	}
3656 	ipw_tx_queue_free(priv);
3657 	release_firmware(raw);
3658 #ifdef CONFIG_PM
3659 	fw_loaded = 0;
3660 	raw = NULL;
3661 #endif
3662 
3663 	return rc;
3664 }
3665 
3666 /*
3667  * DMA services
3668  *
3669  * Theory of operation
3670  *
3671  * A queue is a circular buffers with 'Read' and 'Write' pointers.
3672  * 2 empty entries always kept in the buffer to protect from overflow.
3673  *
3674  * For Tx queue, there are low mark and high mark limits. If, after queuing
3675  * the packet for Tx, free space become < low mark, Tx queue stopped. When
3676  * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3677  * Tx queue resumed.
3678  *
3679  * The IPW operates with six queues, one receive queue in the device's
3680  * sram, one transmit queue for sending commands to the device firmware,
3681  * and four transmit queues for data.
3682  *
3683  * The four transmit queues allow for performing quality of service (qos)
3684  * transmissions as per the 802.11 protocol.  Currently Linux does not
3685  * provide a mechanism to the user for utilizing prioritized queues, so
3686  * we only utilize the first data transmit queue (queue1).
3687  */
3688 
3689 /*
3690  * Driver allocates buffers of this size for Rx
3691  */
3692 
3693 /*
3694  * ipw_rx_queue_space - Return number of free slots available in queue.
3695  */
3696 static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
3697 {
3698 	int s = q->read - q->write;
3699 	if (s <= 0)
3700 		s += RX_QUEUE_SIZE;
3701 	/* keep some buffer to not confuse full and empty queue */
3702 	s -= 2;
3703 	if (s < 0)
3704 		s = 0;
3705 	return s;
3706 }
3707 
3708 static inline int ipw_tx_queue_space(const struct clx2_queue *q)
3709 {
3710 	int s = q->last_used - q->first_empty;
3711 	if (s <= 0)
3712 		s += q->n_bd;
3713 	s -= 2;			/* keep some reserve to not confuse empty and full situations */
3714 	if (s < 0)
3715 		s = 0;
3716 	return s;
3717 }
3718 
3719 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3720 {
3721 	return (++index == n_bd) ? 0 : index;
3722 }
3723 
3724 /*
3725  * Initialize common DMA queue structure
3726  *
3727  * @param q                queue to init
3728  * @param count            Number of BD's to allocate. Should be power of 2
3729  * @param read_register    Address for 'read' register
3730  *                         (not offset within BAR, full address)
3731  * @param write_register   Address for 'write' register
3732  *                         (not offset within BAR, full address)
3733  * @param base_register    Address for 'base' register
3734  *                         (not offset within BAR, full address)
3735  * @param size             Address for 'size' register
3736  *                         (not offset within BAR, full address)
3737  */
3738 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3739 			   int count, u32 read, u32 write, u32 base, u32 size)
3740 {
3741 	q->n_bd = count;
3742 
3743 	q->low_mark = q->n_bd / 4;
3744 	if (q->low_mark < 4)
3745 		q->low_mark = 4;
3746 
3747 	q->high_mark = q->n_bd / 8;
3748 	if (q->high_mark < 2)
3749 		q->high_mark = 2;
3750 
3751 	q->first_empty = q->last_used = 0;
3752 	q->reg_r = read;
3753 	q->reg_w = write;
3754 
3755 	ipw_write32(priv, base, q->dma_addr);
3756 	ipw_write32(priv, size, count);
3757 	ipw_write32(priv, read, 0);
3758 	ipw_write32(priv, write, 0);
3759 
3760 	_ipw_read32(priv, 0x90);
3761 }
3762 
3763 static int ipw_queue_tx_init(struct ipw_priv *priv,
3764 			     struct clx2_tx_queue *q,
3765 			     int count, u32 read, u32 write, u32 base, u32 size)
3766 {
3767 	struct pci_dev *dev = priv->pci_dev;
3768 
3769 	q->txb = kmalloc_array(count, sizeof(q->txb[0]), GFP_KERNEL);
3770 	if (!q->txb)
3771 		return -ENOMEM;
3772 
3773 	q->bd =
3774 	    dma_alloc_coherent(&dev->dev, sizeof(q->bd[0]) * count,
3775 			       &q->q.dma_addr, GFP_KERNEL);
3776 	if (!q->bd) {
3777 		IPW_ERROR("dma_alloc_coherent(%zd) failed\n",
3778 			  sizeof(q->bd[0]) * count);
3779 		kfree(q->txb);
3780 		q->txb = NULL;
3781 		return -ENOMEM;
3782 	}
3783 
3784 	ipw_queue_init(priv, &q->q, count, read, write, base, size);
3785 	return 0;
3786 }
3787 
3788 /*
3789  * Free one TFD, those at index [txq->q.last_used].
3790  * Do NOT advance any indexes
3791  *
3792  * @param dev
3793  * @param txq
3794  */
3795 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3796 				  struct clx2_tx_queue *txq)
3797 {
3798 	struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3799 	struct pci_dev *dev = priv->pci_dev;
3800 	int i;
3801 
3802 	/* classify bd */
3803 	if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3804 		/* nothing to cleanup after for host commands */
3805 		return;
3806 
3807 	/* sanity check */
3808 	if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3809 		IPW_ERROR("Too many chunks: %i\n",
3810 			  le32_to_cpu(bd->u.data.num_chunks));
3811 		/* @todo issue fatal error, it is quite serious situation */
3812 		return;
3813 	}
3814 
3815 	/* unmap chunks if any */
3816 	for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3817 		dma_unmap_single(&dev->dev,
3818 				 le32_to_cpu(bd->u.data.chunk_ptr[i]),
3819 				 le16_to_cpu(bd->u.data.chunk_len[i]),
3820 				 DMA_TO_DEVICE);
3821 		if (txq->txb[txq->q.last_used]) {
3822 			libipw_txb_free(txq->txb[txq->q.last_used]);
3823 			txq->txb[txq->q.last_used] = NULL;
3824 		}
3825 	}
3826 }
3827 
3828 /*
3829  * Deallocate DMA queue.
3830  *
3831  * Empty queue by removing and destroying all BD's.
3832  * Free all buffers.
3833  *
3834  * @param dev
3835  * @param q
3836  */
3837 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3838 {
3839 	struct clx2_queue *q = &txq->q;
3840 	struct pci_dev *dev = priv->pci_dev;
3841 
3842 	if (q->n_bd == 0)
3843 		return;
3844 
3845 	/* first, empty all BD's */
3846 	for (; q->first_empty != q->last_used;
3847 	     q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3848 		ipw_queue_tx_free_tfd(priv, txq);
3849 	}
3850 
3851 	/* free buffers belonging to queue itself */
3852 	dma_free_coherent(&dev->dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3853 			  q->dma_addr);
3854 	kfree(txq->txb);
3855 
3856 	/* 0 fill whole structure */
3857 	memset(txq, 0, sizeof(*txq));
3858 }
3859 
3860 /*
3861  * Destroy all DMA queues and structures
3862  *
3863  * @param priv
3864  */
3865 static void ipw_tx_queue_free(struct ipw_priv *priv)
3866 {
3867 	/* Tx CMD queue */
3868 	ipw_queue_tx_free(priv, &priv->txq_cmd);
3869 
3870 	/* Tx queues */
3871 	ipw_queue_tx_free(priv, &priv->txq[0]);
3872 	ipw_queue_tx_free(priv, &priv->txq[1]);
3873 	ipw_queue_tx_free(priv, &priv->txq[2]);
3874 	ipw_queue_tx_free(priv, &priv->txq[3]);
3875 }
3876 
3877 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3878 {
3879 	/* First 3 bytes are manufacturer */
3880 	bssid[0] = priv->mac_addr[0];
3881 	bssid[1] = priv->mac_addr[1];
3882 	bssid[2] = priv->mac_addr[2];
3883 
3884 	/* Last bytes are random */
3885 	get_random_bytes(&bssid[3], ETH_ALEN - 3);
3886 
3887 	bssid[0] &= 0xfe;	/* clear multicast bit */
3888 	bssid[0] |= 0x02;	/* set local assignment bit (IEEE802) */
3889 }
3890 
3891 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3892 {
3893 	struct ipw_station_entry entry;
3894 	int i;
3895 
3896 	for (i = 0; i < priv->num_stations; i++) {
3897 		if (ether_addr_equal(priv->stations[i], bssid)) {
3898 			/* Another node is active in network */
3899 			priv->missed_adhoc_beacons = 0;
3900 			if (!(priv->config & CFG_STATIC_CHANNEL))
3901 				/* when other nodes drop out, we drop out */
3902 				priv->config &= ~CFG_ADHOC_PERSIST;
3903 
3904 			return i;
3905 		}
3906 	}
3907 
3908 	if (i == MAX_STATIONS)
3909 		return IPW_INVALID_STATION;
3910 
3911 	IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid);
3912 
3913 	entry.reserved = 0;
3914 	entry.support_mode = 0;
3915 	memcpy(entry.mac_addr, bssid, ETH_ALEN);
3916 	memcpy(priv->stations[i], bssid, ETH_ALEN);
3917 	ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3918 			 &entry, sizeof(entry));
3919 	priv->num_stations++;
3920 
3921 	return i;
3922 }
3923 
3924 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3925 {
3926 	int i;
3927 
3928 	for (i = 0; i < priv->num_stations; i++)
3929 		if (ether_addr_equal(priv->stations[i], bssid))
3930 			return i;
3931 
3932 	return IPW_INVALID_STATION;
3933 }
3934 
3935 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3936 {
3937 	int err;
3938 
3939 	if (priv->status & STATUS_ASSOCIATING) {
3940 		IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3941 		schedule_work(&priv->disassociate);
3942 		return;
3943 	}
3944 
3945 	if (!(priv->status & STATUS_ASSOCIATED)) {
3946 		IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3947 		return;
3948 	}
3949 
3950 	IPW_DEBUG_ASSOC("Disassociation attempt from %pM "
3951 			"on channel %d.\n",
3952 			priv->assoc_request.bssid,
3953 			priv->assoc_request.channel);
3954 
3955 	priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3956 	priv->status |= STATUS_DISASSOCIATING;
3957 
3958 	if (quiet)
3959 		priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3960 	else
3961 		priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3962 
3963 	err = ipw_send_associate(priv, &priv->assoc_request);
3964 	if (err) {
3965 		IPW_DEBUG_HC("Attempt to send [dis]associate command "
3966 			     "failed.\n");
3967 		return;
3968 	}
3969 
3970 }
3971 
3972 static int ipw_disassociate(void *data)
3973 {
3974 	struct ipw_priv *priv = data;
3975 	if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3976 		return 0;
3977 	ipw_send_disassociate(data, 0);
3978 	netif_carrier_off(priv->net_dev);
3979 	return 1;
3980 }
3981 
3982 static void ipw_bg_disassociate(struct work_struct *work)
3983 {
3984 	struct ipw_priv *priv =
3985 		container_of(work, struct ipw_priv, disassociate);
3986 	mutex_lock(&priv->mutex);
3987 	ipw_disassociate(priv);
3988 	mutex_unlock(&priv->mutex);
3989 }
3990 
3991 static void ipw_system_config(struct work_struct *work)
3992 {
3993 	struct ipw_priv *priv =
3994 		container_of(work, struct ipw_priv, system_config);
3995 
3996 #ifdef CONFIG_IPW2200_PROMISCUOUS
3997 	if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
3998 		priv->sys_config.accept_all_data_frames = 1;
3999 		priv->sys_config.accept_non_directed_frames = 1;
4000 		priv->sys_config.accept_all_mgmt_bcpr = 1;
4001 		priv->sys_config.accept_all_mgmt_frames = 1;
4002 	}
4003 #endif
4004 
4005 	ipw_send_system_config(priv);
4006 }
4007 
4008 struct ipw_status_code {
4009 	u16 status;
4010 	const char *reason;
4011 };
4012 
4013 static const struct ipw_status_code ipw_status_codes[] = {
4014 	{0x00, "Successful"},
4015 	{0x01, "Unspecified failure"},
4016 	{0x0A, "Cannot support all requested capabilities in the "
4017 	 "Capability information field"},
4018 	{0x0B, "Reassociation denied due to inability to confirm that "
4019 	 "association exists"},
4020 	{0x0C, "Association denied due to reason outside the scope of this "
4021 	 "standard"},
4022 	{0x0D,
4023 	 "Responding station does not support the specified authentication "
4024 	 "algorithm"},
4025 	{0x0E,
4026 	 "Received an Authentication frame with authentication sequence "
4027 	 "transaction sequence number out of expected sequence"},
4028 	{0x0F, "Authentication rejected because of challenge failure"},
4029 	{0x10, "Authentication rejected due to timeout waiting for next "
4030 	 "frame in sequence"},
4031 	{0x11, "Association denied because AP is unable to handle additional "
4032 	 "associated stations"},
4033 	{0x12,
4034 	 "Association denied due to requesting station not supporting all "
4035 	 "of the datarates in the BSSBasicServiceSet Parameter"},
4036 	{0x13,
4037 	 "Association denied due to requesting station not supporting "
4038 	 "short preamble operation"},
4039 	{0x14,
4040 	 "Association denied due to requesting station not supporting "
4041 	 "PBCC encoding"},
4042 	{0x15,
4043 	 "Association denied due to requesting station not supporting "
4044 	 "channel agility"},
4045 	{0x19,
4046 	 "Association denied due to requesting station not supporting "
4047 	 "short slot operation"},
4048 	{0x1A,
4049 	 "Association denied due to requesting station not supporting "
4050 	 "DSSS-OFDM operation"},
4051 	{0x28, "Invalid Information Element"},
4052 	{0x29, "Group Cipher is not valid"},
4053 	{0x2A, "Pairwise Cipher is not valid"},
4054 	{0x2B, "AKMP is not valid"},
4055 	{0x2C, "Unsupported RSN IE version"},
4056 	{0x2D, "Invalid RSN IE Capabilities"},
4057 	{0x2E, "Cipher suite is rejected per security policy"},
4058 };
4059 
4060 static const char *ipw_get_status_code(u16 status)
4061 {
4062 	int i;
4063 	for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
4064 		if (ipw_status_codes[i].status == (status & 0xff))
4065 			return ipw_status_codes[i].reason;
4066 	return "Unknown status value.";
4067 }
4068 
4069 static inline void average_init(struct average *avg)
4070 {
4071 	memset(avg, 0, sizeof(*avg));
4072 }
4073 
4074 #define DEPTH_RSSI 8
4075 #define DEPTH_NOISE 16
4076 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
4077 {
4078 	return ((depth-1)*prev_avg +  val)/depth;
4079 }
4080 
4081 static void average_add(struct average *avg, s16 val)
4082 {
4083 	avg->sum -= avg->entries[avg->pos];
4084 	avg->sum += val;
4085 	avg->entries[avg->pos++] = val;
4086 	if (unlikely(avg->pos == AVG_ENTRIES)) {
4087 		avg->init = 1;
4088 		avg->pos = 0;
4089 	}
4090 }
4091 
4092 static s16 average_value(struct average *avg)
4093 {
4094 	if (!unlikely(avg->init)) {
4095 		if (avg->pos)
4096 			return avg->sum / avg->pos;
4097 		return 0;
4098 	}
4099 
4100 	return avg->sum / AVG_ENTRIES;
4101 }
4102 
4103 static void ipw_reset_stats(struct ipw_priv *priv)
4104 {
4105 	u32 len = sizeof(u32);
4106 
4107 	priv->quality = 0;
4108 
4109 	average_init(&priv->average_missed_beacons);
4110 	priv->exp_avg_rssi = -60;
4111 	priv->exp_avg_noise = -85 + 0x100;
4112 
4113 	priv->last_rate = 0;
4114 	priv->last_missed_beacons = 0;
4115 	priv->last_rx_packets = 0;
4116 	priv->last_tx_packets = 0;
4117 	priv->last_tx_failures = 0;
4118 
4119 	/* Firmware managed, reset only when NIC is restarted, so we have to
4120 	 * normalize on the current value */
4121 	ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4122 			&priv->last_rx_err, &len);
4123 	ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4124 			&priv->last_tx_failures, &len);
4125 
4126 	/* Driver managed, reset with each association */
4127 	priv->missed_adhoc_beacons = 0;
4128 	priv->missed_beacons = 0;
4129 	priv->tx_packets = 0;
4130 	priv->rx_packets = 0;
4131 
4132 }
4133 
4134 static u32 ipw_get_max_rate(struct ipw_priv *priv)
4135 {
4136 	u32 i = 0x80000000;
4137 	u32 mask = priv->rates_mask;
4138 	/* If currently associated in B mode, restrict the maximum
4139 	 * rate match to B rates */
4140 	if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4141 		mask &= LIBIPW_CCK_RATES_MASK;
4142 
4143 	/* TODO: Verify that the rate is supported by the current rates
4144 	 * list. */
4145 
4146 	while (i && !(mask & i))
4147 		i >>= 1;
4148 	switch (i) {
4149 	case LIBIPW_CCK_RATE_1MB_MASK:
4150 		return 1000000;
4151 	case LIBIPW_CCK_RATE_2MB_MASK:
4152 		return 2000000;
4153 	case LIBIPW_CCK_RATE_5MB_MASK:
4154 		return 5500000;
4155 	case LIBIPW_OFDM_RATE_6MB_MASK:
4156 		return 6000000;
4157 	case LIBIPW_OFDM_RATE_9MB_MASK:
4158 		return 9000000;
4159 	case LIBIPW_CCK_RATE_11MB_MASK:
4160 		return 11000000;
4161 	case LIBIPW_OFDM_RATE_12MB_MASK:
4162 		return 12000000;
4163 	case LIBIPW_OFDM_RATE_18MB_MASK:
4164 		return 18000000;
4165 	case LIBIPW_OFDM_RATE_24MB_MASK:
4166 		return 24000000;
4167 	case LIBIPW_OFDM_RATE_36MB_MASK:
4168 		return 36000000;
4169 	case LIBIPW_OFDM_RATE_48MB_MASK:
4170 		return 48000000;
4171 	case LIBIPW_OFDM_RATE_54MB_MASK:
4172 		return 54000000;
4173 	}
4174 
4175 	if (priv->ieee->mode == IEEE_B)
4176 		return 11000000;
4177 	else
4178 		return 54000000;
4179 }
4180 
4181 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4182 {
4183 	u32 rate, len = sizeof(rate);
4184 	int err;
4185 
4186 	if (!(priv->status & STATUS_ASSOCIATED))
4187 		return 0;
4188 
4189 	if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4190 		err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4191 				      &len);
4192 		if (err) {
4193 			IPW_DEBUG_INFO("failed querying ordinals.\n");
4194 			return 0;
4195 		}
4196 	} else
4197 		return ipw_get_max_rate(priv);
4198 
4199 	switch (rate) {
4200 	case IPW_TX_RATE_1MB:
4201 		return 1000000;
4202 	case IPW_TX_RATE_2MB:
4203 		return 2000000;
4204 	case IPW_TX_RATE_5MB:
4205 		return 5500000;
4206 	case IPW_TX_RATE_6MB:
4207 		return 6000000;
4208 	case IPW_TX_RATE_9MB:
4209 		return 9000000;
4210 	case IPW_TX_RATE_11MB:
4211 		return 11000000;
4212 	case IPW_TX_RATE_12MB:
4213 		return 12000000;
4214 	case IPW_TX_RATE_18MB:
4215 		return 18000000;
4216 	case IPW_TX_RATE_24MB:
4217 		return 24000000;
4218 	case IPW_TX_RATE_36MB:
4219 		return 36000000;
4220 	case IPW_TX_RATE_48MB:
4221 		return 48000000;
4222 	case IPW_TX_RATE_54MB:
4223 		return 54000000;
4224 	}
4225 
4226 	return 0;
4227 }
4228 
4229 #define IPW_STATS_INTERVAL (2 * HZ)
4230 static void ipw_gather_stats(struct ipw_priv *priv)
4231 {
4232 	u32 rx_err, rx_err_delta, rx_packets_delta;
4233 	u32 tx_failures, tx_failures_delta, tx_packets_delta;
4234 	u32 missed_beacons_percent, missed_beacons_delta;
4235 	u32 quality = 0;
4236 	u32 len = sizeof(u32);
4237 	s16 rssi;
4238 	u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4239 	    rate_quality;
4240 	u32 max_rate;
4241 
4242 	if (!(priv->status & STATUS_ASSOCIATED)) {
4243 		priv->quality = 0;
4244 		return;
4245 	}
4246 
4247 	/* Update the statistics */
4248 	ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4249 			&priv->missed_beacons, &len);
4250 	missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4251 	priv->last_missed_beacons = priv->missed_beacons;
4252 	if (priv->assoc_request.beacon_interval) {
4253 		missed_beacons_percent = missed_beacons_delta *
4254 		    (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) /
4255 		    (IPW_STATS_INTERVAL * 10);
4256 	} else {
4257 		missed_beacons_percent = 0;
4258 	}
4259 	average_add(&priv->average_missed_beacons, missed_beacons_percent);
4260 
4261 	ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4262 	rx_err_delta = rx_err - priv->last_rx_err;
4263 	priv->last_rx_err = rx_err;
4264 
4265 	ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4266 	tx_failures_delta = tx_failures - priv->last_tx_failures;
4267 	priv->last_tx_failures = tx_failures;
4268 
4269 	rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4270 	priv->last_rx_packets = priv->rx_packets;
4271 
4272 	tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4273 	priv->last_tx_packets = priv->tx_packets;
4274 
4275 	/* Calculate quality based on the following:
4276 	 *
4277 	 * Missed beacon: 100% = 0, 0% = 70% missed
4278 	 * Rate: 60% = 1Mbs, 100% = Max
4279 	 * Rx and Tx errors represent a straight % of total Rx/Tx
4280 	 * RSSI: 100% = > -50,  0% = < -80
4281 	 * Rx errors: 100% = 0, 0% = 50% missed
4282 	 *
4283 	 * The lowest computed quality is used.
4284 	 *
4285 	 */
4286 #define BEACON_THRESHOLD 5
4287 	beacon_quality = 100 - missed_beacons_percent;
4288 	if (beacon_quality < BEACON_THRESHOLD)
4289 		beacon_quality = 0;
4290 	else
4291 		beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4292 		    (100 - BEACON_THRESHOLD);
4293 	IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4294 			beacon_quality, missed_beacons_percent);
4295 
4296 	priv->last_rate = ipw_get_current_rate(priv);
4297 	max_rate = ipw_get_max_rate(priv);
4298 	rate_quality = priv->last_rate * 40 / max_rate + 60;
4299 	IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4300 			rate_quality, priv->last_rate / 1000000);
4301 
4302 	if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4303 		rx_quality = 100 - (rx_err_delta * 100) /
4304 		    (rx_packets_delta + rx_err_delta);
4305 	else
4306 		rx_quality = 100;
4307 	IPW_DEBUG_STATS("Rx quality   : %3d%% (%u errors, %u packets)\n",
4308 			rx_quality, rx_err_delta, rx_packets_delta);
4309 
4310 	if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4311 		tx_quality = 100 - (tx_failures_delta * 100) /
4312 		    (tx_packets_delta + tx_failures_delta);
4313 	else
4314 		tx_quality = 100;
4315 	IPW_DEBUG_STATS("Tx quality   : %3d%% (%u errors, %u packets)\n",
4316 			tx_quality, tx_failures_delta, tx_packets_delta);
4317 
4318 	rssi = priv->exp_avg_rssi;
4319 	signal_quality =
4320 	    (100 *
4321 	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4322 	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4323 	     (priv->ieee->perfect_rssi - rssi) *
4324 	     (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4325 	      62 * (priv->ieee->perfect_rssi - rssi))) /
4326 	    ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4327 	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4328 	if (signal_quality > 100)
4329 		signal_quality = 100;
4330 	else if (signal_quality < 1)
4331 		signal_quality = 0;
4332 
4333 	IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4334 			signal_quality, rssi);
4335 
4336 	quality = min(rx_quality, signal_quality);
4337 	quality = min(tx_quality, quality);
4338 	quality = min(rate_quality, quality);
4339 	quality = min(beacon_quality, quality);
4340 	if (quality == beacon_quality)
4341 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4342 				quality);
4343 	if (quality == rate_quality)
4344 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4345 				quality);
4346 	if (quality == tx_quality)
4347 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4348 				quality);
4349 	if (quality == rx_quality)
4350 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4351 				quality);
4352 	if (quality == signal_quality)
4353 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4354 				quality);
4355 
4356 	priv->quality = quality;
4357 
4358 	schedule_delayed_work(&priv->gather_stats, IPW_STATS_INTERVAL);
4359 }
4360 
4361 static void ipw_bg_gather_stats(struct work_struct *work)
4362 {
4363 	struct ipw_priv *priv =
4364 		container_of(work, struct ipw_priv, gather_stats.work);
4365 	mutex_lock(&priv->mutex);
4366 	ipw_gather_stats(priv);
4367 	mutex_unlock(&priv->mutex);
4368 }
4369 
4370 /* Missed beacon behavior:
4371  * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4372  * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4373  * Above disassociate threshold, give up and stop scanning.
4374  * Roaming is disabled if disassociate_threshold <= roaming_threshold  */
4375 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4376 					    int missed_count)
4377 {
4378 	priv->notif_missed_beacons = missed_count;
4379 
4380 	if (missed_count > priv->disassociate_threshold &&
4381 	    priv->status & STATUS_ASSOCIATED) {
4382 		/* If associated and we've hit the missed
4383 		 * beacon threshold, disassociate, turn
4384 		 * off roaming, and abort any active scans */
4385 		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4386 			  IPW_DL_STATE | IPW_DL_ASSOC,
4387 			  "Missed beacon: %d - disassociate\n", missed_count);
4388 		priv->status &= ~STATUS_ROAMING;
4389 		if (priv->status & STATUS_SCANNING) {
4390 			IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4391 				  IPW_DL_STATE,
4392 				  "Aborting scan with missed beacon.\n");
4393 			schedule_work(&priv->abort_scan);
4394 		}
4395 
4396 		schedule_work(&priv->disassociate);
4397 		return;
4398 	}
4399 
4400 	if (priv->status & STATUS_ROAMING) {
4401 		/* If we are currently roaming, then just
4402 		 * print a debug statement... */
4403 		IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4404 			  "Missed beacon: %d - roam in progress\n",
4405 			  missed_count);
4406 		return;
4407 	}
4408 
4409 	if (roaming &&
4410 	    (missed_count > priv->roaming_threshold &&
4411 	     missed_count <= priv->disassociate_threshold)) {
4412 		/* If we are not already roaming, set the ROAM
4413 		 * bit in the status and kick off a scan.
4414 		 * This can happen several times before we reach
4415 		 * disassociate_threshold. */
4416 		IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4417 			  "Missed beacon: %d - initiate "
4418 			  "roaming\n", missed_count);
4419 		if (!(priv->status & STATUS_ROAMING)) {
4420 			priv->status |= STATUS_ROAMING;
4421 			if (!(priv->status & STATUS_SCANNING))
4422 				schedule_delayed_work(&priv->request_scan, 0);
4423 		}
4424 		return;
4425 	}
4426 
4427 	if (priv->status & STATUS_SCANNING &&
4428 	    missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) {
4429 		/* Stop scan to keep fw from getting
4430 		 * stuck (only if we aren't roaming --
4431 		 * otherwise we'll never scan more than 2 or 3
4432 		 * channels..) */
4433 		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4434 			  "Aborting scan with missed beacon.\n");
4435 		schedule_work(&priv->abort_scan);
4436 	}
4437 
4438 	IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4439 }
4440 
4441 static void ipw_scan_event(struct work_struct *work)
4442 {
4443 	union iwreq_data wrqu;
4444 
4445 	struct ipw_priv *priv =
4446 		container_of(work, struct ipw_priv, scan_event.work);
4447 
4448 	wrqu.data.length = 0;
4449 	wrqu.data.flags = 0;
4450 	wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4451 }
4452 
4453 static void handle_scan_event(struct ipw_priv *priv)
4454 {
4455 	/* Only userspace-requested scan completion events go out immediately */
4456 	if (!priv->user_requested_scan) {
4457 		schedule_delayed_work(&priv->scan_event,
4458 				      round_jiffies_relative(msecs_to_jiffies(4000)));
4459 	} else {
4460 		priv->user_requested_scan = 0;
4461 		mod_delayed_work(system_wq, &priv->scan_event, 0);
4462 	}
4463 }
4464 
4465 /*
4466  * Handle host notification packet.
4467  * Called from interrupt routine
4468  */
4469 static void ipw_rx_notification(struct ipw_priv *priv,
4470 				       struct ipw_rx_notification *notif)
4471 {
4472 	u16 size = le16_to_cpu(notif->size);
4473 
4474 	IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size);
4475 
4476 	switch (notif->subtype) {
4477 	case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4478 			struct notif_association *assoc = &notif->u.assoc;
4479 
4480 			switch (assoc->state) {
4481 			case CMAS_ASSOCIATED:{
4482 					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4483 						  IPW_DL_ASSOC,
4484 						  "associated: '%*pE' %pM\n",
4485 						  priv->essid_len, priv->essid,
4486 						  priv->bssid);
4487 
4488 					switch (priv->ieee->iw_mode) {
4489 					case IW_MODE_INFRA:
4490 						memcpy(priv->ieee->bssid,
4491 						       priv->bssid, ETH_ALEN);
4492 						break;
4493 
4494 					case IW_MODE_ADHOC:
4495 						memcpy(priv->ieee->bssid,
4496 						       priv->bssid, ETH_ALEN);
4497 
4498 						/* clear out the station table */
4499 						priv->num_stations = 0;
4500 
4501 						IPW_DEBUG_ASSOC
4502 						    ("queueing adhoc check\n");
4503 						schedule_delayed_work(
4504 							&priv->adhoc_check,
4505 							le16_to_cpu(priv->
4506 							assoc_request.
4507 							beacon_interval));
4508 						break;
4509 					}
4510 
4511 					priv->status &= ~STATUS_ASSOCIATING;
4512 					priv->status |= STATUS_ASSOCIATED;
4513 					schedule_work(&priv->system_config);
4514 
4515 #ifdef CONFIG_IPW2200_QOS
4516 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4517 			 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control))
4518 					if ((priv->status & STATUS_AUTH) &&
4519 					    (IPW_GET_PACKET_STYPE(&notif->u.raw)
4520 					     == IEEE80211_STYPE_ASSOC_RESP)) {
4521 						if ((sizeof
4522 						     (struct
4523 						      libipw_assoc_response)
4524 						     <= size)
4525 						    && (size <= 2314)) {
4526 							struct
4527 							libipw_rx_stats
4528 							    stats = {
4529 								.len = size - 1,
4530 							};
4531 
4532 							IPW_DEBUG_QOS
4533 							    ("QoS Associate "
4534 							     "size %d\n", size);
4535 							libipw_rx_mgt(priv->
4536 									 ieee,
4537 									 (struct
4538 									  libipw_hdr_4addr
4539 									  *)
4540 									 &notif->u.raw, &stats);
4541 						}
4542 					}
4543 #endif
4544 
4545 					schedule_work(&priv->link_up);
4546 
4547 					break;
4548 				}
4549 
4550 			case CMAS_AUTHENTICATED:{
4551 					if (priv->
4552 					    status & (STATUS_ASSOCIATED |
4553 						      STATUS_AUTH)) {
4554 						struct notif_authenticate *auth
4555 						    = &notif->u.auth;
4556 						IPW_DEBUG(IPW_DL_NOTIF |
4557 							  IPW_DL_STATE |
4558 							  IPW_DL_ASSOC,
4559 							  "deauthenticated: '%*pE' %pM: (0x%04X) - %s\n",
4560 							  priv->essid_len,
4561 							  priv->essid,
4562 							  priv->bssid,
4563 							  le16_to_cpu(auth->status),
4564 							  ipw_get_status_code
4565 							  (le16_to_cpu
4566 							   (auth->status)));
4567 
4568 						priv->status &=
4569 						    ~(STATUS_ASSOCIATING |
4570 						      STATUS_AUTH |
4571 						      STATUS_ASSOCIATED);
4572 
4573 						schedule_work(&priv->link_down);
4574 						break;
4575 					}
4576 
4577 					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4578 						  IPW_DL_ASSOC,
4579 						  "authenticated: '%*pE' %pM\n",
4580 						  priv->essid_len, priv->essid,
4581 						  priv->bssid);
4582 					break;
4583 				}
4584 
4585 			case CMAS_INIT:{
4586 					if (priv->status & STATUS_AUTH) {
4587 						struct
4588 						    libipw_assoc_response
4589 						*resp;
4590 						resp =
4591 						    (struct
4592 						     libipw_assoc_response
4593 						     *)&notif->u.raw;
4594 						IPW_DEBUG(IPW_DL_NOTIF |
4595 							  IPW_DL_STATE |
4596 							  IPW_DL_ASSOC,
4597 							  "association failed (0x%04X): %s\n",
4598 							  le16_to_cpu(resp->status),
4599 							  ipw_get_status_code
4600 							  (le16_to_cpu
4601 							   (resp->status)));
4602 					}
4603 
4604 					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4605 						  IPW_DL_ASSOC,
4606 						  "disassociated: '%*pE' %pM\n",
4607 						  priv->essid_len, priv->essid,
4608 						  priv->bssid);
4609 
4610 					priv->status &=
4611 					    ~(STATUS_DISASSOCIATING |
4612 					      STATUS_ASSOCIATING |
4613 					      STATUS_ASSOCIATED | STATUS_AUTH);
4614 					if (priv->assoc_network
4615 					    && (priv->assoc_network->
4616 						capability &
4617 						WLAN_CAPABILITY_IBSS))
4618 						ipw_remove_current_network
4619 						    (priv);
4620 
4621 					schedule_work(&priv->link_down);
4622 
4623 					break;
4624 				}
4625 
4626 			case CMAS_RX_ASSOC_RESP:
4627 				break;
4628 
4629 			default:
4630 				IPW_ERROR("assoc: unknown (%d)\n",
4631 					  assoc->state);
4632 				break;
4633 			}
4634 
4635 			break;
4636 		}
4637 
4638 	case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4639 			struct notif_authenticate *auth = &notif->u.auth;
4640 			switch (auth->state) {
4641 			case CMAS_AUTHENTICATED:
4642 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4643 					  "authenticated: '%*pE' %pM\n",
4644 					  priv->essid_len, priv->essid,
4645 					  priv->bssid);
4646 				priv->status |= STATUS_AUTH;
4647 				break;
4648 
4649 			case CMAS_INIT:
4650 				if (priv->status & STATUS_AUTH) {
4651 					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4652 						  IPW_DL_ASSOC,
4653 						  "authentication failed (0x%04X): %s\n",
4654 						  le16_to_cpu(auth->status),
4655 						  ipw_get_status_code(le16_to_cpu
4656 								      (auth->
4657 								       status)));
4658 				}
4659 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4660 					  IPW_DL_ASSOC,
4661 					  "deauthenticated: '%*pE' %pM\n",
4662 					  priv->essid_len, priv->essid,
4663 					  priv->bssid);
4664 
4665 				priv->status &= ~(STATUS_ASSOCIATING |
4666 						  STATUS_AUTH |
4667 						  STATUS_ASSOCIATED);
4668 
4669 				schedule_work(&priv->link_down);
4670 				break;
4671 
4672 			case CMAS_TX_AUTH_SEQ_1:
4673 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4674 					  IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4675 				break;
4676 			case CMAS_RX_AUTH_SEQ_2:
4677 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4678 					  IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4679 				break;
4680 			case CMAS_AUTH_SEQ_1_PASS:
4681 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4682 					  IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4683 				break;
4684 			case CMAS_AUTH_SEQ_1_FAIL:
4685 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4686 					  IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4687 				break;
4688 			case CMAS_TX_AUTH_SEQ_3:
4689 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4690 					  IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4691 				break;
4692 			case CMAS_RX_AUTH_SEQ_4:
4693 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4694 					  IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4695 				break;
4696 			case CMAS_AUTH_SEQ_2_PASS:
4697 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4698 					  IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4699 				break;
4700 			case CMAS_AUTH_SEQ_2_FAIL:
4701 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4702 					  IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4703 				break;
4704 			case CMAS_TX_ASSOC:
4705 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4706 					  IPW_DL_ASSOC, "TX_ASSOC\n");
4707 				break;
4708 			case CMAS_RX_ASSOC_RESP:
4709 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4710 					  IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4711 
4712 				break;
4713 			case CMAS_ASSOCIATED:
4714 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4715 					  IPW_DL_ASSOC, "ASSOCIATED\n");
4716 				break;
4717 			default:
4718 				IPW_DEBUG_NOTIF("auth: failure - %d\n",
4719 						auth->state);
4720 				break;
4721 			}
4722 			break;
4723 		}
4724 
4725 	case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4726 			struct notif_channel_result *x =
4727 			    &notif->u.channel_result;
4728 
4729 			if (size == sizeof(*x)) {
4730 				IPW_DEBUG_SCAN("Scan result for channel %d\n",
4731 					       x->channel_num);
4732 			} else {
4733 				IPW_DEBUG_SCAN("Scan result of wrong size %d "
4734 					       "(should be %zd)\n",
4735 					       size, sizeof(*x));
4736 			}
4737 			break;
4738 		}
4739 
4740 	case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4741 			struct notif_scan_complete *x = &notif->u.scan_complete;
4742 			if (size == sizeof(*x)) {
4743 				IPW_DEBUG_SCAN
4744 				    ("Scan completed: type %d, %d channels, "
4745 				     "%d status\n", x->scan_type,
4746 				     x->num_channels, x->status);
4747 			} else {
4748 				IPW_ERROR("Scan completed of wrong size %d "
4749 					  "(should be %zd)\n",
4750 					  size, sizeof(*x));
4751 			}
4752 
4753 			priv->status &=
4754 			    ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4755 
4756 			wake_up_interruptible(&priv->wait_state);
4757 			cancel_delayed_work(&priv->scan_check);
4758 
4759 			if (priv->status & STATUS_EXIT_PENDING)
4760 				break;
4761 
4762 			priv->ieee->scans++;
4763 
4764 #ifdef CONFIG_IPW2200_MONITOR
4765 			if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4766 				priv->status |= STATUS_SCAN_FORCED;
4767 				schedule_delayed_work(&priv->request_scan, 0);
4768 				break;
4769 			}
4770 			priv->status &= ~STATUS_SCAN_FORCED;
4771 #endif				/* CONFIG_IPW2200_MONITOR */
4772 
4773 			/* Do queued direct scans first */
4774 			if (priv->status & STATUS_DIRECT_SCAN_PENDING)
4775 				schedule_delayed_work(&priv->request_direct_scan, 0);
4776 
4777 			if (!(priv->status & (STATUS_ASSOCIATED |
4778 					      STATUS_ASSOCIATING |
4779 					      STATUS_ROAMING |
4780 					      STATUS_DISASSOCIATING)))
4781 				schedule_work(&priv->associate);
4782 			else if (priv->status & STATUS_ROAMING) {
4783 				if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4784 					/* If a scan completed and we are in roam mode, then
4785 					 * the scan that completed was the one requested as a
4786 					 * result of entering roam... so, schedule the
4787 					 * roam work */
4788 					schedule_work(&priv->roam);
4789 				else
4790 					/* Don't schedule if we aborted the scan */
4791 					priv->status &= ~STATUS_ROAMING;
4792 			} else if (priv->status & STATUS_SCAN_PENDING)
4793 				schedule_delayed_work(&priv->request_scan, 0);
4794 			else if (priv->config & CFG_BACKGROUND_SCAN
4795 				 && priv->status & STATUS_ASSOCIATED)
4796 				schedule_delayed_work(&priv->request_scan,
4797 						      round_jiffies_relative(HZ));
4798 
4799 			/* Send an empty event to user space.
4800 			 * We don't send the received data on the event because
4801 			 * it would require us to do complex transcoding, and
4802 			 * we want to minimise the work done in the irq handler
4803 			 * Use a request to extract the data.
4804 			 * Also, we generate this even for any scan, regardless
4805 			 * on how the scan was initiated. User space can just
4806 			 * sync on periodic scan to get fresh data...
4807 			 * Jean II */
4808 			if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4809 				handle_scan_event(priv);
4810 			break;
4811 		}
4812 
4813 	case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4814 			struct notif_frag_length *x = &notif->u.frag_len;
4815 
4816 			if (size == sizeof(*x))
4817 				IPW_ERROR("Frag length: %d\n",
4818 					  le16_to_cpu(x->frag_length));
4819 			else
4820 				IPW_ERROR("Frag length of wrong size %d "
4821 					  "(should be %zd)\n",
4822 					  size, sizeof(*x));
4823 			break;
4824 		}
4825 
4826 	case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4827 			struct notif_link_deterioration *x =
4828 			    &notif->u.link_deterioration;
4829 
4830 			if (size == sizeof(*x)) {
4831 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4832 					"link deterioration: type %d, cnt %d\n",
4833 					x->silence_notification_type,
4834 					x->silence_count);
4835 				memcpy(&priv->last_link_deterioration, x,
4836 				       sizeof(*x));
4837 			} else {
4838 				IPW_ERROR("Link Deterioration of wrong size %d "
4839 					  "(should be %zd)\n",
4840 					  size, sizeof(*x));
4841 			}
4842 			break;
4843 		}
4844 
4845 	case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4846 			IPW_ERROR("Dino config\n");
4847 			if (priv->hcmd
4848 			    && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4849 				IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4850 
4851 			break;
4852 		}
4853 
4854 	case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4855 			struct notif_beacon_state *x = &notif->u.beacon_state;
4856 			if (size != sizeof(*x)) {
4857 				IPW_ERROR
4858 				    ("Beacon state of wrong size %d (should "
4859 				     "be %zd)\n", size, sizeof(*x));
4860 				break;
4861 			}
4862 
4863 			if (le32_to_cpu(x->state) ==
4864 			    HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4865 				ipw_handle_missed_beacon(priv,
4866 							 le32_to_cpu(x->
4867 								     number));
4868 
4869 			break;
4870 		}
4871 
4872 	case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4873 			struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4874 			if (size == sizeof(*x)) {
4875 				IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4876 					  "0x%02x station %d\n",
4877 					  x->key_state, x->security_type,
4878 					  x->station_index);
4879 				break;
4880 			}
4881 
4882 			IPW_ERROR
4883 			    ("TGi Tx Key of wrong size %d (should be %zd)\n",
4884 			     size, sizeof(*x));
4885 			break;
4886 		}
4887 
4888 	case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4889 			struct notif_calibration *x = &notif->u.calibration;
4890 
4891 			if (size == sizeof(*x)) {
4892 				memcpy(&priv->calib, x, sizeof(*x));
4893 				IPW_DEBUG_INFO("TODO: Calibration\n");
4894 				break;
4895 			}
4896 
4897 			IPW_ERROR
4898 			    ("Calibration of wrong size %d (should be %zd)\n",
4899 			     size, sizeof(*x));
4900 			break;
4901 		}
4902 
4903 	case HOST_NOTIFICATION_NOISE_STATS:{
4904 			if (size == sizeof(u32)) {
4905 				priv->exp_avg_noise =
4906 				    exponential_average(priv->exp_avg_noise,
4907 				    (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4908 				    DEPTH_NOISE);
4909 				break;
4910 			}
4911 
4912 			IPW_ERROR
4913 			    ("Noise stat is wrong size %d (should be %zd)\n",
4914 			     size, sizeof(u32));
4915 			break;
4916 		}
4917 
4918 	default:
4919 		IPW_DEBUG_NOTIF("Unknown notification: "
4920 				"subtype=%d,flags=0x%2x,size=%d\n",
4921 				notif->subtype, notif->flags, size);
4922 	}
4923 }
4924 
4925 /*
4926  * Destroys all DMA structures and initialise them again
4927  *
4928  * @param priv
4929  * @return error code
4930  */
4931 static int ipw_queue_reset(struct ipw_priv *priv)
4932 {
4933 	int rc = 0;
4934 	/* @todo customize queue sizes */
4935 	int nTx = 64, nTxCmd = 8;
4936 	ipw_tx_queue_free(priv);
4937 	/* Tx CMD queue */
4938 	rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4939 			       IPW_TX_CMD_QUEUE_READ_INDEX,
4940 			       IPW_TX_CMD_QUEUE_WRITE_INDEX,
4941 			       IPW_TX_CMD_QUEUE_BD_BASE,
4942 			       IPW_TX_CMD_QUEUE_BD_SIZE);
4943 	if (rc) {
4944 		IPW_ERROR("Tx Cmd queue init failed\n");
4945 		goto error;
4946 	}
4947 	/* Tx queue(s) */
4948 	rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4949 			       IPW_TX_QUEUE_0_READ_INDEX,
4950 			       IPW_TX_QUEUE_0_WRITE_INDEX,
4951 			       IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4952 	if (rc) {
4953 		IPW_ERROR("Tx 0 queue init failed\n");
4954 		goto error;
4955 	}
4956 	rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4957 			       IPW_TX_QUEUE_1_READ_INDEX,
4958 			       IPW_TX_QUEUE_1_WRITE_INDEX,
4959 			       IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4960 	if (rc) {
4961 		IPW_ERROR("Tx 1 queue init failed\n");
4962 		goto error;
4963 	}
4964 	rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4965 			       IPW_TX_QUEUE_2_READ_INDEX,
4966 			       IPW_TX_QUEUE_2_WRITE_INDEX,
4967 			       IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4968 	if (rc) {
4969 		IPW_ERROR("Tx 2 queue init failed\n");
4970 		goto error;
4971 	}
4972 	rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4973 			       IPW_TX_QUEUE_3_READ_INDEX,
4974 			       IPW_TX_QUEUE_3_WRITE_INDEX,
4975 			       IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4976 	if (rc) {
4977 		IPW_ERROR("Tx 3 queue init failed\n");
4978 		goto error;
4979 	}
4980 	/* statistics */
4981 	priv->rx_bufs_min = 0;
4982 	priv->rx_pend_max = 0;
4983 	return rc;
4984 
4985       error:
4986 	ipw_tx_queue_free(priv);
4987 	return rc;
4988 }
4989 
4990 /*
4991  * Reclaim Tx queue entries no more used by NIC.
4992  *
4993  * When FW advances 'R' index, all entries between old and
4994  * new 'R' index need to be reclaimed. As result, some free space
4995  * forms. If there is enough free space (> low mark), wake Tx queue.
4996  *
4997  * @note Need to protect against garbage in 'R' index
4998  * @param priv
4999  * @param txq
5000  * @param qindex
5001  * @return Number of used entries remains in the queue
5002  */
5003 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
5004 				struct clx2_tx_queue *txq, int qindex)
5005 {
5006 	u32 hw_tail;
5007 	int used;
5008 	struct clx2_queue *q = &txq->q;
5009 
5010 	hw_tail = ipw_read32(priv, q->reg_r);
5011 	if (hw_tail >= q->n_bd) {
5012 		IPW_ERROR
5013 		    ("Read index for DMA queue (%d) is out of range [0-%d)\n",
5014 		     hw_tail, q->n_bd);
5015 		goto done;
5016 	}
5017 	for (; q->last_used != hw_tail;
5018 	     q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
5019 		ipw_queue_tx_free_tfd(priv, txq);
5020 		priv->tx_packets++;
5021 	}
5022       done:
5023 	if ((ipw_tx_queue_space(q) > q->low_mark) &&
5024 	    (qindex >= 0))
5025 		netif_wake_queue(priv->net_dev);
5026 	used = q->first_empty - q->last_used;
5027 	if (used < 0)
5028 		used += q->n_bd;
5029 
5030 	return used;
5031 }
5032 
5033 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, const void *buf,
5034 			     int len, int sync)
5035 {
5036 	struct clx2_tx_queue *txq = &priv->txq_cmd;
5037 	struct clx2_queue *q = &txq->q;
5038 	struct tfd_frame *tfd;
5039 
5040 	if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
5041 		IPW_ERROR("No space for Tx\n");
5042 		return -EBUSY;
5043 	}
5044 
5045 	tfd = &txq->bd[q->first_empty];
5046 	txq->txb[q->first_empty] = NULL;
5047 
5048 	memset(tfd, 0, sizeof(*tfd));
5049 	tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
5050 	tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
5051 	priv->hcmd_seq++;
5052 	tfd->u.cmd.index = hcmd;
5053 	tfd->u.cmd.length = len;
5054 	memcpy(tfd->u.cmd.payload, buf, len);
5055 	q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
5056 	ipw_write32(priv, q->reg_w, q->first_empty);
5057 	_ipw_read32(priv, 0x90);
5058 
5059 	return 0;
5060 }
5061 
5062 /*
5063  * Rx theory of operation
5064  *
5065  * The host allocates 32 DMA target addresses and passes the host address
5066  * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
5067  * 0 to 31
5068  *
5069  * Rx Queue Indexes
5070  * The host/firmware share two index registers for managing the Rx buffers.
5071  *
5072  * The READ index maps to the first position that the firmware may be writing
5073  * to -- the driver can read up to (but not including) this position and get
5074  * good data.
5075  * The READ index is managed by the firmware once the card is enabled.
5076  *
5077  * The WRITE index maps to the last position the driver has read from -- the
5078  * position preceding WRITE is the last slot the firmware can place a packet.
5079  *
5080  * The queue is empty (no good data) if WRITE = READ - 1, and is full if
5081  * WRITE = READ.
5082  *
5083  * During initialization the host sets up the READ queue position to the first
5084  * INDEX position, and WRITE to the last (READ - 1 wrapped)
5085  *
5086  * When the firmware places a packet in a buffer it will advance the READ index
5087  * and fire the RX interrupt.  The driver can then query the READ index and
5088  * process as many packets as possible, moving the WRITE index forward as it
5089  * resets the Rx queue buffers with new memory.
5090  *
5091  * The management in the driver is as follows:
5092  * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free.  When
5093  *   ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5094  *   to replensish the ipw->rxq->rx_free.
5095  * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
5096  *   ipw->rxq is replenished and the READ INDEX is updated (updating the
5097  *   'processed' and 'read' driver indexes as well)
5098  * + A received packet is processed and handed to the kernel network stack,
5099  *   detached from the ipw->rxq.  The driver 'processed' index is updated.
5100  * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5101  *   list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5102  *   INDEX is not incremented and ipw->status(RX_STALLED) is set.  If there
5103  *   were enough free buffers and RX_STALLED is set it is cleared.
5104  *
5105  *
5106  * Driver sequence:
5107  *
5108  * ipw_rx_queue_alloc()       Allocates rx_free
5109  * ipw_rx_queue_replenish()   Replenishes rx_free list from rx_used, and calls
5110  *                            ipw_rx_queue_restock
5111  * ipw_rx_queue_restock()     Moves available buffers from rx_free into Rx
5112  *                            queue, updates firmware pointers, and updates
5113  *                            the WRITE index.  If insufficient rx_free buffers
5114  *                            are available, schedules ipw_rx_queue_replenish
5115  *
5116  * -- enable interrupts --
5117  * ISR - ipw_rx()             Detach ipw_rx_mem_buffers from pool up to the
5118  *                            READ INDEX, detaching the SKB from the pool.
5119  *                            Moves the packet buffer from queue to rx_used.
5120  *                            Calls ipw_rx_queue_restock to refill any empty
5121  *                            slots.
5122  * ...
5123  *
5124  */
5125 
5126 /*
5127  * If there are slots in the RX queue that  need to be restocked,
5128  * and we have free pre-allocated buffers, fill the ranks as much
5129  * as we can pulling from rx_free.
5130  *
5131  * This moves the 'write' index forward to catch up with 'processed', and
5132  * also updates the memory address in the firmware to reference the new
5133  * target buffer.
5134  */
5135 static void ipw_rx_queue_restock(struct ipw_priv *priv)
5136 {
5137 	struct ipw_rx_queue *rxq = priv->rxq;
5138 	struct list_head *element;
5139 	struct ipw_rx_mem_buffer *rxb;
5140 	unsigned long flags;
5141 	int write;
5142 
5143 	spin_lock_irqsave(&rxq->lock, flags);
5144 	write = rxq->write;
5145 	while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
5146 		element = rxq->rx_free.next;
5147 		rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5148 		list_del(element);
5149 
5150 		ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5151 			    rxb->dma_addr);
5152 		rxq->queue[rxq->write] = rxb;
5153 		rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5154 		rxq->free_count--;
5155 	}
5156 	spin_unlock_irqrestore(&rxq->lock, flags);
5157 
5158 	/* If the pre-allocated buffer pool is dropping low, schedule to
5159 	 * refill it */
5160 	if (rxq->free_count <= RX_LOW_WATERMARK)
5161 		schedule_work(&priv->rx_replenish);
5162 
5163 	/* If we've added more space for the firmware to place data, tell it */
5164 	if (write != rxq->write)
5165 		ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5166 }
5167 
5168 /*
5169  * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5170  * Also restock the Rx queue via ipw_rx_queue_restock.
5171  *
5172  * This is called as a scheduled work item (except for during initialization)
5173  */
5174 static void ipw_rx_queue_replenish(void *data)
5175 {
5176 	struct ipw_priv *priv = data;
5177 	struct ipw_rx_queue *rxq = priv->rxq;
5178 	struct list_head *element;
5179 	struct ipw_rx_mem_buffer *rxb;
5180 	unsigned long flags;
5181 
5182 	spin_lock_irqsave(&rxq->lock, flags);
5183 	while (!list_empty(&rxq->rx_used)) {
5184 		element = rxq->rx_used.next;
5185 		rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5186 		rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5187 		if (!rxb->skb) {
5188 			printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5189 			       priv->net_dev->name);
5190 			/* We don't reschedule replenish work here -- we will
5191 			 * call the restock method and if it still needs
5192 			 * more buffers it will schedule replenish */
5193 			break;
5194 		}
5195 		list_del(element);
5196 
5197 		rxb->dma_addr =
5198 		    dma_map_single(&priv->pci_dev->dev, rxb->skb->data,
5199 				   IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
5200 
5201 		list_add_tail(&rxb->list, &rxq->rx_free);
5202 		rxq->free_count++;
5203 	}
5204 	spin_unlock_irqrestore(&rxq->lock, flags);
5205 
5206 	ipw_rx_queue_restock(priv);
5207 }
5208 
5209 static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5210 {
5211 	struct ipw_priv *priv =
5212 		container_of(work, struct ipw_priv, rx_replenish);
5213 	mutex_lock(&priv->mutex);
5214 	ipw_rx_queue_replenish(priv);
5215 	mutex_unlock(&priv->mutex);
5216 }
5217 
5218 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5219  * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5220  * This free routine walks the list of POOL entries and if SKB is set to
5221  * non NULL it is unmapped and freed
5222  */
5223 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5224 {
5225 	int i;
5226 
5227 	if (!rxq)
5228 		return;
5229 
5230 	for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5231 		if (rxq->pool[i].skb != NULL) {
5232 			dma_unmap_single(&priv->pci_dev->dev,
5233 					 rxq->pool[i].dma_addr,
5234 					 IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
5235 			dev_kfree_skb(rxq->pool[i].skb);
5236 		}
5237 	}
5238 
5239 	kfree(rxq);
5240 }
5241 
5242 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5243 {
5244 	struct ipw_rx_queue *rxq;
5245 	int i;
5246 
5247 	rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5248 	if (unlikely(!rxq)) {
5249 		IPW_ERROR("memory allocation failed\n");
5250 		return NULL;
5251 	}
5252 	spin_lock_init(&rxq->lock);
5253 	INIT_LIST_HEAD(&rxq->rx_free);
5254 	INIT_LIST_HEAD(&rxq->rx_used);
5255 
5256 	/* Fill the rx_used queue with _all_ of the Rx buffers */
5257 	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5258 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5259 
5260 	/* Set us so that we have processed and used all buffers, but have
5261 	 * not restocked the Rx queue with fresh buffers */
5262 	rxq->read = rxq->write = 0;
5263 	rxq->free_count = 0;
5264 
5265 	return rxq;
5266 }
5267 
5268 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5269 {
5270 	rate &= ~LIBIPW_BASIC_RATE_MASK;
5271 	if (ieee_mode == IEEE_A) {
5272 		switch (rate) {
5273 		case LIBIPW_OFDM_RATE_6MB:
5274 			return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ?
5275 			    1 : 0;
5276 		case LIBIPW_OFDM_RATE_9MB:
5277 			return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ?
5278 			    1 : 0;
5279 		case LIBIPW_OFDM_RATE_12MB:
5280 			return priv->
5281 			    rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5282 		case LIBIPW_OFDM_RATE_18MB:
5283 			return priv->
5284 			    rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5285 		case LIBIPW_OFDM_RATE_24MB:
5286 			return priv->
5287 			    rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5288 		case LIBIPW_OFDM_RATE_36MB:
5289 			return priv->
5290 			    rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5291 		case LIBIPW_OFDM_RATE_48MB:
5292 			return priv->
5293 			    rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5294 		case LIBIPW_OFDM_RATE_54MB:
5295 			return priv->
5296 			    rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5297 		default:
5298 			return 0;
5299 		}
5300 	}
5301 
5302 	/* B and G mixed */
5303 	switch (rate) {
5304 	case LIBIPW_CCK_RATE_1MB:
5305 		return priv->rates_mask & LIBIPW_CCK_RATE_1MB_MASK ? 1 : 0;
5306 	case LIBIPW_CCK_RATE_2MB:
5307 		return priv->rates_mask & LIBIPW_CCK_RATE_2MB_MASK ? 1 : 0;
5308 	case LIBIPW_CCK_RATE_5MB:
5309 		return priv->rates_mask & LIBIPW_CCK_RATE_5MB_MASK ? 1 : 0;
5310 	case LIBIPW_CCK_RATE_11MB:
5311 		return priv->rates_mask & LIBIPW_CCK_RATE_11MB_MASK ? 1 : 0;
5312 	}
5313 
5314 	/* If we are limited to B modulations, bail at this point */
5315 	if (ieee_mode == IEEE_B)
5316 		return 0;
5317 
5318 	/* G */
5319 	switch (rate) {
5320 	case LIBIPW_OFDM_RATE_6MB:
5321 		return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 1 : 0;
5322 	case LIBIPW_OFDM_RATE_9MB:
5323 		return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 1 : 0;
5324 	case LIBIPW_OFDM_RATE_12MB:
5325 		return priv->rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5326 	case LIBIPW_OFDM_RATE_18MB:
5327 		return priv->rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5328 	case LIBIPW_OFDM_RATE_24MB:
5329 		return priv->rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5330 	case LIBIPW_OFDM_RATE_36MB:
5331 		return priv->rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5332 	case LIBIPW_OFDM_RATE_48MB:
5333 		return priv->rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5334 	case LIBIPW_OFDM_RATE_54MB:
5335 		return priv->rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5336 	}
5337 
5338 	return 0;
5339 }
5340 
5341 static int ipw_compatible_rates(struct ipw_priv *priv,
5342 				const struct libipw_network *network,
5343 				struct ipw_supported_rates *rates)
5344 {
5345 	int num_rates, i;
5346 
5347 	memset(rates, 0, sizeof(*rates));
5348 	num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5349 	rates->num_rates = 0;
5350 	for (i = 0; i < num_rates; i++) {
5351 		if (!ipw_is_rate_in_mask(priv, network->mode,
5352 					 network->rates[i])) {
5353 
5354 			if (network->rates[i] & LIBIPW_BASIC_RATE_MASK) {
5355 				IPW_DEBUG_SCAN("Adding masked mandatory "
5356 					       "rate %02X\n",
5357 					       network->rates[i]);
5358 				rates->supported_rates[rates->num_rates++] =
5359 				    network->rates[i];
5360 				continue;
5361 			}
5362 
5363 			IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5364 				       network->rates[i], priv->rates_mask);
5365 			continue;
5366 		}
5367 
5368 		rates->supported_rates[rates->num_rates++] = network->rates[i];
5369 	}
5370 
5371 	num_rates = min(network->rates_ex_len,
5372 			(u8) (IPW_MAX_RATES - num_rates));
5373 	for (i = 0; i < num_rates; i++) {
5374 		if (!ipw_is_rate_in_mask(priv, network->mode,
5375 					 network->rates_ex[i])) {
5376 			if (network->rates_ex[i] & LIBIPW_BASIC_RATE_MASK) {
5377 				IPW_DEBUG_SCAN("Adding masked mandatory "
5378 					       "rate %02X\n",
5379 					       network->rates_ex[i]);
5380 				rates->supported_rates[rates->num_rates++] =
5381 				    network->rates[i];
5382 				continue;
5383 			}
5384 
5385 			IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5386 				       network->rates_ex[i], priv->rates_mask);
5387 			continue;
5388 		}
5389 
5390 		rates->supported_rates[rates->num_rates++] =
5391 		    network->rates_ex[i];
5392 	}
5393 
5394 	return 1;
5395 }
5396 
5397 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5398 				  const struct ipw_supported_rates *src)
5399 {
5400 	u8 i;
5401 	for (i = 0; i < src->num_rates; i++)
5402 		dest->supported_rates[i] = src->supported_rates[i];
5403 	dest->num_rates = src->num_rates;
5404 }
5405 
5406 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5407  * mask should ever be used -- right now all callers to add the scan rates are
5408  * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5409 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5410 				   u8 modulation, u32 rate_mask)
5411 {
5412 	u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5413 	    LIBIPW_BASIC_RATE_MASK : 0;
5414 
5415 	if (rate_mask & LIBIPW_CCK_RATE_1MB_MASK)
5416 		rates->supported_rates[rates->num_rates++] =
5417 		    LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_1MB;
5418 
5419 	if (rate_mask & LIBIPW_CCK_RATE_2MB_MASK)
5420 		rates->supported_rates[rates->num_rates++] =
5421 		    LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_2MB;
5422 
5423 	if (rate_mask & LIBIPW_CCK_RATE_5MB_MASK)
5424 		rates->supported_rates[rates->num_rates++] = basic_mask |
5425 		    LIBIPW_CCK_RATE_5MB;
5426 
5427 	if (rate_mask & LIBIPW_CCK_RATE_11MB_MASK)
5428 		rates->supported_rates[rates->num_rates++] = basic_mask |
5429 		    LIBIPW_CCK_RATE_11MB;
5430 }
5431 
5432 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5433 				    u8 modulation, u32 rate_mask)
5434 {
5435 	u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5436 	    LIBIPW_BASIC_RATE_MASK : 0;
5437 
5438 	if (rate_mask & LIBIPW_OFDM_RATE_6MB_MASK)
5439 		rates->supported_rates[rates->num_rates++] = basic_mask |
5440 		    LIBIPW_OFDM_RATE_6MB;
5441 
5442 	if (rate_mask & LIBIPW_OFDM_RATE_9MB_MASK)
5443 		rates->supported_rates[rates->num_rates++] =
5444 		    LIBIPW_OFDM_RATE_9MB;
5445 
5446 	if (rate_mask & LIBIPW_OFDM_RATE_12MB_MASK)
5447 		rates->supported_rates[rates->num_rates++] = basic_mask |
5448 		    LIBIPW_OFDM_RATE_12MB;
5449 
5450 	if (rate_mask & LIBIPW_OFDM_RATE_18MB_MASK)
5451 		rates->supported_rates[rates->num_rates++] =
5452 		    LIBIPW_OFDM_RATE_18MB;
5453 
5454 	if (rate_mask & LIBIPW_OFDM_RATE_24MB_MASK)
5455 		rates->supported_rates[rates->num_rates++] = basic_mask |
5456 		    LIBIPW_OFDM_RATE_24MB;
5457 
5458 	if (rate_mask & LIBIPW_OFDM_RATE_36MB_MASK)
5459 		rates->supported_rates[rates->num_rates++] =
5460 		    LIBIPW_OFDM_RATE_36MB;
5461 
5462 	if (rate_mask & LIBIPW_OFDM_RATE_48MB_MASK)
5463 		rates->supported_rates[rates->num_rates++] =
5464 		    LIBIPW_OFDM_RATE_48MB;
5465 
5466 	if (rate_mask & LIBIPW_OFDM_RATE_54MB_MASK)
5467 		rates->supported_rates[rates->num_rates++] =
5468 		    LIBIPW_OFDM_RATE_54MB;
5469 }
5470 
5471 struct ipw_network_match {
5472 	struct libipw_network *network;
5473 	struct ipw_supported_rates rates;
5474 };
5475 
5476 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5477 				  struct ipw_network_match *match,
5478 				  struct libipw_network *network,
5479 				  int roaming)
5480 {
5481 	struct ipw_supported_rates rates;
5482 
5483 	/* Verify that this network's capability is compatible with the
5484 	 * current mode (AdHoc or Infrastructure) */
5485 	if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5486 	     !(network->capability & WLAN_CAPABILITY_IBSS))) {
5487 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded due to capability mismatch.\n",
5488 				network->ssid_len, network->ssid,
5489 				network->bssid);
5490 		return 0;
5491 	}
5492 
5493 	if (unlikely(roaming)) {
5494 		/* If we are roaming, then ensure check if this is a valid
5495 		 * network to try and roam to */
5496 		if ((network->ssid_len != match->network->ssid_len) ||
5497 		    memcmp(network->ssid, match->network->ssid,
5498 			   network->ssid_len)) {
5499 			IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of non-network ESSID.\n",
5500 					network->ssid_len, network->ssid,
5501 					network->bssid);
5502 			return 0;
5503 		}
5504 	} else {
5505 		/* If an ESSID has been configured then compare the broadcast
5506 		 * ESSID to ours */
5507 		if ((priv->config & CFG_STATIC_ESSID) &&
5508 		    ((network->ssid_len != priv->essid_len) ||
5509 		     memcmp(network->ssid, priv->essid,
5510 			    min(network->ssid_len, priv->essid_len)))) {
5511 			IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of ESSID mismatch: '%*pE'.\n",
5512 					network->ssid_len, network->ssid,
5513 					network->bssid, priv->essid_len,
5514 					priv->essid);
5515 			return 0;
5516 		}
5517 	}
5518 
5519 	/* If the old network rate is better than this one, don't bother
5520 	 * testing everything else. */
5521 
5522 	if (network->time_stamp[0] < match->network->time_stamp[0]) {
5523 		IPW_DEBUG_MERGE("Network '%*pE excluded because newer than current network.\n",
5524 				match->network->ssid_len, match->network->ssid);
5525 		return 0;
5526 	} else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5527 		IPW_DEBUG_MERGE("Network '%*pE excluded because newer than current network.\n",
5528 				match->network->ssid_len, match->network->ssid);
5529 		return 0;
5530 	}
5531 
5532 	/* Now go through and see if the requested network is valid... */
5533 	if (priv->ieee->scan_age != 0 &&
5534 	    time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5535 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of age: %ums.\n",
5536 				network->ssid_len, network->ssid,
5537 				network->bssid,
5538 				jiffies_to_msecs(jiffies -
5539 						 network->last_scanned));
5540 		return 0;
5541 	}
5542 
5543 	if ((priv->config & CFG_STATIC_CHANNEL) &&
5544 	    (network->channel != priv->channel)) {
5545 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of channel mismatch: %d != %d.\n",
5546 				network->ssid_len, network->ssid,
5547 				network->bssid,
5548 				network->channel, priv->channel);
5549 		return 0;
5550 	}
5551 
5552 	/* Verify privacy compatibility */
5553 	if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5554 	    ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5555 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of privacy mismatch: %s != %s.\n",
5556 				network->ssid_len, network->ssid,
5557 				network->bssid,
5558 				priv->
5559 				capability & CAP_PRIVACY_ON ? "on" : "off",
5560 				network->
5561 				capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5562 				"off");
5563 		return 0;
5564 	}
5565 
5566 	if (ether_addr_equal(network->bssid, priv->bssid)) {
5567 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of the same BSSID match: %pM.\n",
5568 				network->ssid_len, network->ssid,
5569 				network->bssid, priv->bssid);
5570 		return 0;
5571 	}
5572 
5573 	/* Filter out any incompatible freq / mode combinations */
5574 	if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5575 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of invalid frequency/mode combination.\n",
5576 				network->ssid_len, network->ssid,
5577 				network->bssid);
5578 		return 0;
5579 	}
5580 
5581 	/* Ensure that the rates supported by the driver are compatible with
5582 	 * this AP, including verification of basic rates (mandatory) */
5583 	if (!ipw_compatible_rates(priv, network, &rates)) {
5584 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because configured rate mask excludes AP mandatory rate.\n",
5585 				network->ssid_len, network->ssid,
5586 				network->bssid);
5587 		return 0;
5588 	}
5589 
5590 	if (rates.num_rates == 0) {
5591 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of no compatible rates.\n",
5592 				network->ssid_len, network->ssid,
5593 				network->bssid);
5594 		return 0;
5595 	}
5596 
5597 	/* TODO: Perform any further minimal comparititive tests.  We do not
5598 	 * want to put too much policy logic here; intelligent scan selection
5599 	 * should occur within a generic IEEE 802.11 user space tool.  */
5600 
5601 	/* Set up 'new' AP to this network */
5602 	ipw_copy_rates(&match->rates, &rates);
5603 	match->network = network;
5604 	IPW_DEBUG_MERGE("Network '%*pE (%pM)' is a viable match.\n",
5605 			network->ssid_len, network->ssid, network->bssid);
5606 
5607 	return 1;
5608 }
5609 
5610 static void ipw_merge_adhoc_network(struct work_struct *work)
5611 {
5612 	struct ipw_priv *priv =
5613 		container_of(work, struct ipw_priv, merge_networks);
5614 	struct libipw_network *network = NULL;
5615 	struct ipw_network_match match = {
5616 		.network = priv->assoc_network
5617 	};
5618 
5619 	if ((priv->status & STATUS_ASSOCIATED) &&
5620 	    (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5621 		/* First pass through ROAM process -- look for a better
5622 		 * network */
5623 		unsigned long flags;
5624 
5625 		spin_lock_irqsave(&priv->ieee->lock, flags);
5626 		list_for_each_entry(network, &priv->ieee->network_list, list) {
5627 			if (network != priv->assoc_network)
5628 				ipw_find_adhoc_network(priv, &match, network,
5629 						       1);
5630 		}
5631 		spin_unlock_irqrestore(&priv->ieee->lock, flags);
5632 
5633 		if (match.network == priv->assoc_network) {
5634 			IPW_DEBUG_MERGE("No better ADHOC in this network to "
5635 					"merge to.\n");
5636 			return;
5637 		}
5638 
5639 		mutex_lock(&priv->mutex);
5640 		if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
5641 			IPW_DEBUG_MERGE("remove network %*pE\n",
5642 					priv->essid_len, priv->essid);
5643 			ipw_remove_current_network(priv);
5644 		}
5645 
5646 		ipw_disassociate(priv);
5647 		priv->assoc_network = match.network;
5648 		mutex_unlock(&priv->mutex);
5649 		return;
5650 	}
5651 }
5652 
5653 static int ipw_best_network(struct ipw_priv *priv,
5654 			    struct ipw_network_match *match,
5655 			    struct libipw_network *network, int roaming)
5656 {
5657 	struct ipw_supported_rates rates;
5658 
5659 	/* Verify that this network's capability is compatible with the
5660 	 * current mode (AdHoc or Infrastructure) */
5661 	if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5662 	     !(network->capability & WLAN_CAPABILITY_ESS)) ||
5663 	    (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5664 	     !(network->capability & WLAN_CAPABILITY_IBSS))) {
5665 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded due to capability mismatch.\n",
5666 				network->ssid_len, network->ssid,
5667 				network->bssid);
5668 		return 0;
5669 	}
5670 
5671 	if (unlikely(roaming)) {
5672 		/* If we are roaming, then ensure check if this is a valid
5673 		 * network to try and roam to */
5674 		if ((network->ssid_len != match->network->ssid_len) ||
5675 		    memcmp(network->ssid, match->network->ssid,
5676 			   network->ssid_len)) {
5677 			IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of non-network ESSID.\n",
5678 					network->ssid_len, network->ssid,
5679 					network->bssid);
5680 			return 0;
5681 		}
5682 	} else {
5683 		/* If an ESSID has been configured then compare the broadcast
5684 		 * ESSID to ours */
5685 		if ((priv->config & CFG_STATIC_ESSID) &&
5686 		    ((network->ssid_len != priv->essid_len) ||
5687 		     memcmp(network->ssid, priv->essid,
5688 			    min(network->ssid_len, priv->essid_len)))) {
5689 			IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of ESSID mismatch: '%*pE'.\n",
5690 					network->ssid_len, network->ssid,
5691 					network->bssid, priv->essid_len,
5692 					priv->essid);
5693 			return 0;
5694 		}
5695 	}
5696 
5697 	/* If the old network rate is better than this one, don't bother
5698 	 * testing everything else. */
5699 	if (match->network && match->network->stats.rssi > network->stats.rssi) {
5700 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because '%*pE (%pM)' has a stronger signal.\n",
5701 				network->ssid_len, network->ssid,
5702 				network->bssid, match->network->ssid_len,
5703 				match->network->ssid, match->network->bssid);
5704 		return 0;
5705 	}
5706 
5707 	/* If this network has already had an association attempt within the
5708 	 * last 3 seconds, do not try and associate again... */
5709 	if (network->last_associate &&
5710 	    time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5711 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of storming (%ums since last assoc attempt).\n",
5712 				network->ssid_len, network->ssid,
5713 				network->bssid,
5714 				jiffies_to_msecs(jiffies -
5715 						 network->last_associate));
5716 		return 0;
5717 	}
5718 
5719 	/* Now go through and see if the requested network is valid... */
5720 	if (priv->ieee->scan_age != 0 &&
5721 	    time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5722 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of age: %ums.\n",
5723 				network->ssid_len, network->ssid,
5724 				network->bssid,
5725 				jiffies_to_msecs(jiffies -
5726 						 network->last_scanned));
5727 		return 0;
5728 	}
5729 
5730 	if ((priv->config & CFG_STATIC_CHANNEL) &&
5731 	    (network->channel != priv->channel)) {
5732 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of channel mismatch: %d != %d.\n",
5733 				network->ssid_len, network->ssid,
5734 				network->bssid,
5735 				network->channel, priv->channel);
5736 		return 0;
5737 	}
5738 
5739 	/* Verify privacy compatibility */
5740 	if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5741 	    ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5742 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of privacy mismatch: %s != %s.\n",
5743 				network->ssid_len, network->ssid,
5744 				network->bssid,
5745 				priv->capability & CAP_PRIVACY_ON ? "on" :
5746 				"off",
5747 				network->capability &
5748 				WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5749 		return 0;
5750 	}
5751 
5752 	if ((priv->config & CFG_STATIC_BSSID) &&
5753 	    !ether_addr_equal(network->bssid, priv->bssid)) {
5754 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of BSSID mismatch: %pM.\n",
5755 				network->ssid_len, network->ssid,
5756 				network->bssid, priv->bssid);
5757 		return 0;
5758 	}
5759 
5760 	/* Filter out any incompatible freq / mode combinations */
5761 	if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5762 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of invalid frequency/mode combination.\n",
5763 				network->ssid_len, network->ssid,
5764 				network->bssid);
5765 		return 0;
5766 	}
5767 
5768 	/* Filter out invalid channel in current GEO */
5769 	if (!libipw_is_valid_channel(priv->ieee, network->channel)) {
5770 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of invalid channel in current GEO\n",
5771 				network->ssid_len, network->ssid,
5772 				network->bssid);
5773 		return 0;
5774 	}
5775 
5776 	/* Ensure that the rates supported by the driver are compatible with
5777 	 * this AP, including verification of basic rates (mandatory) */
5778 	if (!ipw_compatible_rates(priv, network, &rates)) {
5779 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because configured rate mask excludes AP mandatory rate.\n",
5780 				network->ssid_len, network->ssid,
5781 				network->bssid);
5782 		return 0;
5783 	}
5784 
5785 	if (rates.num_rates == 0) {
5786 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of no compatible rates.\n",
5787 				network->ssid_len, network->ssid,
5788 				network->bssid);
5789 		return 0;
5790 	}
5791 
5792 	/* TODO: Perform any further minimal comparititive tests.  We do not
5793 	 * want to put too much policy logic here; intelligent scan selection
5794 	 * should occur within a generic IEEE 802.11 user space tool.  */
5795 
5796 	/* Set up 'new' AP to this network */
5797 	ipw_copy_rates(&match->rates, &rates);
5798 	match->network = network;
5799 
5800 	IPW_DEBUG_ASSOC("Network '%*pE (%pM)' is a viable match.\n",
5801 			network->ssid_len, network->ssid, network->bssid);
5802 
5803 	return 1;
5804 }
5805 
5806 static void ipw_adhoc_create(struct ipw_priv *priv,
5807 			     struct libipw_network *network)
5808 {
5809 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
5810 	int i;
5811 
5812 	/*
5813 	 * For the purposes of scanning, we can set our wireless mode
5814 	 * to trigger scans across combinations of bands, but when it
5815 	 * comes to creating a new ad-hoc network, we have tell the FW
5816 	 * exactly which band to use.
5817 	 *
5818 	 * We also have the possibility of an invalid channel for the
5819 	 * chossen band.  Attempting to create a new ad-hoc network
5820 	 * with an invalid channel for wireless mode will trigger a
5821 	 * FW fatal error.
5822 	 *
5823 	 */
5824 	switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
5825 	case LIBIPW_52GHZ_BAND:
5826 		network->mode = IEEE_A;
5827 		i = libipw_channel_to_index(priv->ieee, priv->channel);
5828 		BUG_ON(i == -1);
5829 		if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5830 			IPW_WARNING("Overriding invalid channel\n");
5831 			priv->channel = geo->a[0].channel;
5832 		}
5833 		break;
5834 
5835 	case LIBIPW_24GHZ_BAND:
5836 		if (priv->ieee->mode & IEEE_G)
5837 			network->mode = IEEE_G;
5838 		else
5839 			network->mode = IEEE_B;
5840 		i = libipw_channel_to_index(priv->ieee, priv->channel);
5841 		BUG_ON(i == -1);
5842 		if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5843 			IPW_WARNING("Overriding invalid channel\n");
5844 			priv->channel = geo->bg[0].channel;
5845 		}
5846 		break;
5847 
5848 	default:
5849 		IPW_WARNING("Overriding invalid channel\n");
5850 		if (priv->ieee->mode & IEEE_A) {
5851 			network->mode = IEEE_A;
5852 			priv->channel = geo->a[0].channel;
5853 		} else if (priv->ieee->mode & IEEE_G) {
5854 			network->mode = IEEE_G;
5855 			priv->channel = geo->bg[0].channel;
5856 		} else {
5857 			network->mode = IEEE_B;
5858 			priv->channel = geo->bg[0].channel;
5859 		}
5860 		break;
5861 	}
5862 
5863 	network->channel = priv->channel;
5864 	priv->config |= CFG_ADHOC_PERSIST;
5865 	ipw_create_bssid(priv, network->bssid);
5866 	network->ssid_len = priv->essid_len;
5867 	memcpy(network->ssid, priv->essid, priv->essid_len);
5868 	memset(&network->stats, 0, sizeof(network->stats));
5869 	network->capability = WLAN_CAPABILITY_IBSS;
5870 	if (!(priv->config & CFG_PREAMBLE_LONG))
5871 		network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5872 	if (priv->capability & CAP_PRIVACY_ON)
5873 		network->capability |= WLAN_CAPABILITY_PRIVACY;
5874 	network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5875 	memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5876 	network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5877 	memcpy(network->rates_ex,
5878 	       &priv->rates.supported_rates[network->rates_len],
5879 	       network->rates_ex_len);
5880 	network->last_scanned = 0;
5881 	network->flags = 0;
5882 	network->last_associate = 0;
5883 	network->time_stamp[0] = 0;
5884 	network->time_stamp[1] = 0;
5885 	network->beacon_interval = 100;	/* Default */
5886 	network->listen_interval = 10;	/* Default */
5887 	network->atim_window = 0;	/* Default */
5888 	network->wpa_ie_len = 0;
5889 	network->rsn_ie_len = 0;
5890 }
5891 
5892 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5893 {
5894 	struct ipw_tgi_tx_key key;
5895 
5896 	if (!(priv->ieee->sec.flags & (1 << index)))
5897 		return;
5898 
5899 	key.key_id = index;
5900 	memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5901 	key.security_type = type;
5902 	key.station_index = 0;	/* always 0 for BSS */
5903 	key.flags = 0;
5904 	/* 0 for new key; previous value of counter (after fatal error) */
5905 	key.tx_counter[0] = cpu_to_le32(0);
5906 	key.tx_counter[1] = cpu_to_le32(0);
5907 
5908 	ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5909 }
5910 
5911 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5912 {
5913 	struct ipw_wep_key key;
5914 	int i;
5915 
5916 	key.cmd_id = DINO_CMD_WEP_KEY;
5917 	key.seq_num = 0;
5918 
5919 	/* Note: AES keys cannot be set for multiple times.
5920 	 * Only set it at the first time. */
5921 	for (i = 0; i < 4; i++) {
5922 		key.key_index = i | type;
5923 		if (!(priv->ieee->sec.flags & (1 << i))) {
5924 			key.key_size = 0;
5925 			continue;
5926 		}
5927 
5928 		key.key_size = priv->ieee->sec.key_sizes[i];
5929 		memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5930 
5931 		ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5932 	}
5933 }
5934 
5935 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5936 {
5937 	if (priv->ieee->host_encrypt)
5938 		return;
5939 
5940 	switch (level) {
5941 	case SEC_LEVEL_3:
5942 		priv->sys_config.disable_unicast_decryption = 0;
5943 		priv->ieee->host_decrypt = 0;
5944 		break;
5945 	case SEC_LEVEL_2:
5946 		priv->sys_config.disable_unicast_decryption = 1;
5947 		priv->ieee->host_decrypt = 1;
5948 		break;
5949 	case SEC_LEVEL_1:
5950 		priv->sys_config.disable_unicast_decryption = 0;
5951 		priv->ieee->host_decrypt = 0;
5952 		break;
5953 	case SEC_LEVEL_0:
5954 		priv->sys_config.disable_unicast_decryption = 1;
5955 		break;
5956 	default:
5957 		break;
5958 	}
5959 }
5960 
5961 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5962 {
5963 	if (priv->ieee->host_encrypt)
5964 		return;
5965 
5966 	switch (level) {
5967 	case SEC_LEVEL_3:
5968 		priv->sys_config.disable_multicast_decryption = 0;
5969 		break;
5970 	case SEC_LEVEL_2:
5971 		priv->sys_config.disable_multicast_decryption = 1;
5972 		break;
5973 	case SEC_LEVEL_1:
5974 		priv->sys_config.disable_multicast_decryption = 0;
5975 		break;
5976 	case SEC_LEVEL_0:
5977 		priv->sys_config.disable_multicast_decryption = 1;
5978 		break;
5979 	default:
5980 		break;
5981 	}
5982 }
5983 
5984 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
5985 {
5986 	switch (priv->ieee->sec.level) {
5987 	case SEC_LEVEL_3:
5988 		if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5989 			ipw_send_tgi_tx_key(priv,
5990 					    DCT_FLAG_EXT_SECURITY_CCM,
5991 					    priv->ieee->sec.active_key);
5992 
5993 		if (!priv->ieee->host_mc_decrypt)
5994 			ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
5995 		break;
5996 	case SEC_LEVEL_2:
5997 		if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5998 			ipw_send_tgi_tx_key(priv,
5999 					    DCT_FLAG_EXT_SECURITY_TKIP,
6000 					    priv->ieee->sec.active_key);
6001 		break;
6002 	case SEC_LEVEL_1:
6003 		ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
6004 		ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
6005 		ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
6006 		break;
6007 	case SEC_LEVEL_0:
6008 	default:
6009 		break;
6010 	}
6011 }
6012 
6013 static void ipw_adhoc_check(void *data)
6014 {
6015 	struct ipw_priv *priv = data;
6016 
6017 	if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
6018 	    !(priv->config & CFG_ADHOC_PERSIST)) {
6019 		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
6020 			  IPW_DL_STATE | IPW_DL_ASSOC,
6021 			  "Missed beacon: %d - disassociate\n",
6022 			  priv->missed_adhoc_beacons);
6023 		ipw_remove_current_network(priv);
6024 		ipw_disassociate(priv);
6025 		return;
6026 	}
6027 
6028 	schedule_delayed_work(&priv->adhoc_check,
6029 			      le16_to_cpu(priv->assoc_request.beacon_interval));
6030 }
6031 
6032 static void ipw_bg_adhoc_check(struct work_struct *work)
6033 {
6034 	struct ipw_priv *priv =
6035 		container_of(work, struct ipw_priv, adhoc_check.work);
6036 	mutex_lock(&priv->mutex);
6037 	ipw_adhoc_check(priv);
6038 	mutex_unlock(&priv->mutex);
6039 }
6040 
6041 static void ipw_debug_config(struct ipw_priv *priv)
6042 {
6043 	IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6044 		       "[CFG 0x%08X]\n", priv->config);
6045 	if (priv->config & CFG_STATIC_CHANNEL)
6046 		IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6047 	else
6048 		IPW_DEBUG_INFO("Channel unlocked.\n");
6049 	if (priv->config & CFG_STATIC_ESSID)
6050 		IPW_DEBUG_INFO("ESSID locked to '%*pE'\n",
6051 			       priv->essid_len, priv->essid);
6052 	else
6053 		IPW_DEBUG_INFO("ESSID unlocked.\n");
6054 	if (priv->config & CFG_STATIC_BSSID)
6055 		IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid);
6056 	else
6057 		IPW_DEBUG_INFO("BSSID unlocked.\n");
6058 	if (priv->capability & CAP_PRIVACY_ON)
6059 		IPW_DEBUG_INFO("PRIVACY on\n");
6060 	else
6061 		IPW_DEBUG_INFO("PRIVACY off\n");
6062 	IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6063 }
6064 
6065 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6066 {
6067 	/* TODO: Verify that this works... */
6068 	struct ipw_fixed_rate fr;
6069 	u32 reg;
6070 	u16 mask = 0;
6071 	u16 new_tx_rates = priv->rates_mask;
6072 
6073 	/* Identify 'current FW band' and match it with the fixed
6074 	 * Tx rates */
6075 
6076 	switch (priv->ieee->freq_band) {
6077 	case LIBIPW_52GHZ_BAND:	/* A only */
6078 		/* IEEE_A */
6079 		if (priv->rates_mask & ~LIBIPW_OFDM_RATES_MASK) {
6080 			/* Invalid fixed rate mask */
6081 			IPW_DEBUG_WX
6082 			    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6083 			new_tx_rates = 0;
6084 			break;
6085 		}
6086 
6087 		new_tx_rates >>= LIBIPW_OFDM_SHIFT_MASK_A;
6088 		break;
6089 
6090 	default:		/* 2.4Ghz or Mixed */
6091 		/* IEEE_B */
6092 		if (mode == IEEE_B) {
6093 			if (new_tx_rates & ~LIBIPW_CCK_RATES_MASK) {
6094 				/* Invalid fixed rate mask */
6095 				IPW_DEBUG_WX
6096 				    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6097 				new_tx_rates = 0;
6098 			}
6099 			break;
6100 		}
6101 
6102 		/* IEEE_G */
6103 		if (new_tx_rates & ~(LIBIPW_CCK_RATES_MASK |
6104 				    LIBIPW_OFDM_RATES_MASK)) {
6105 			/* Invalid fixed rate mask */
6106 			IPW_DEBUG_WX
6107 			    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6108 			new_tx_rates = 0;
6109 			break;
6110 		}
6111 
6112 		if (LIBIPW_OFDM_RATE_6MB_MASK & new_tx_rates) {
6113 			mask |= (LIBIPW_OFDM_RATE_6MB_MASK >> 1);
6114 			new_tx_rates &= ~LIBIPW_OFDM_RATE_6MB_MASK;
6115 		}
6116 
6117 		if (LIBIPW_OFDM_RATE_9MB_MASK & new_tx_rates) {
6118 			mask |= (LIBIPW_OFDM_RATE_9MB_MASK >> 1);
6119 			new_tx_rates &= ~LIBIPW_OFDM_RATE_9MB_MASK;
6120 		}
6121 
6122 		if (LIBIPW_OFDM_RATE_12MB_MASK & new_tx_rates) {
6123 			mask |= (LIBIPW_OFDM_RATE_12MB_MASK >> 1);
6124 			new_tx_rates &= ~LIBIPW_OFDM_RATE_12MB_MASK;
6125 		}
6126 
6127 		new_tx_rates |= mask;
6128 		break;
6129 	}
6130 
6131 	fr.tx_rates = cpu_to_le16(new_tx_rates);
6132 
6133 	reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6134 	ipw_write_reg32(priv, reg, *(u32 *) & fr);
6135 }
6136 
6137 static void ipw_abort_scan(struct ipw_priv *priv)
6138 {
6139 	int err;
6140 
6141 	if (priv->status & STATUS_SCAN_ABORTING) {
6142 		IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6143 		return;
6144 	}
6145 	priv->status |= STATUS_SCAN_ABORTING;
6146 
6147 	err = ipw_send_scan_abort(priv);
6148 	if (err)
6149 		IPW_DEBUG_HC("Request to abort scan failed.\n");
6150 }
6151 
6152 static void ipw_add_scan_channels(struct ipw_priv *priv,
6153 				  struct ipw_scan_request_ext *scan,
6154 				  int scan_type)
6155 {
6156 	int channel_index = 0;
6157 	const struct libipw_geo *geo;
6158 	int i;
6159 
6160 	geo = libipw_get_geo(priv->ieee);
6161 
6162 	if (priv->ieee->freq_band & LIBIPW_52GHZ_BAND) {
6163 		int start = channel_index;
6164 		for (i = 0; i < geo->a_channels; i++) {
6165 			if ((priv->status & STATUS_ASSOCIATED) &&
6166 			    geo->a[i].channel == priv->channel)
6167 				continue;
6168 			channel_index++;
6169 			scan->channels_list[channel_index] = geo->a[i].channel;
6170 			ipw_set_scan_type(scan, channel_index,
6171 					  geo->a[i].
6172 					  flags & LIBIPW_CH_PASSIVE_ONLY ?
6173 					  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6174 					  scan_type);
6175 		}
6176 
6177 		if (start != channel_index) {
6178 			scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6179 			    (channel_index - start);
6180 			channel_index++;
6181 		}
6182 	}
6183 
6184 	if (priv->ieee->freq_band & LIBIPW_24GHZ_BAND) {
6185 		int start = channel_index;
6186 		if (priv->config & CFG_SPEED_SCAN) {
6187 			int index;
6188 			u8 channels[LIBIPW_24GHZ_CHANNELS] = {
6189 				/* nop out the list */
6190 				[0] = 0
6191 			};
6192 
6193 			u8 channel;
6194 			while (channel_index < IPW_SCAN_CHANNELS - 1) {
6195 				channel =
6196 				    priv->speed_scan[priv->speed_scan_pos];
6197 				if (channel == 0) {
6198 					priv->speed_scan_pos = 0;
6199 					channel = priv->speed_scan[0];
6200 				}
6201 				if ((priv->status & STATUS_ASSOCIATED) &&
6202 				    channel == priv->channel) {
6203 					priv->speed_scan_pos++;
6204 					continue;
6205 				}
6206 
6207 				/* If this channel has already been
6208 				 * added in scan, break from loop
6209 				 * and this will be the first channel
6210 				 * in the next scan.
6211 				 */
6212 				if (channels[channel - 1] != 0)
6213 					break;
6214 
6215 				channels[channel - 1] = 1;
6216 				priv->speed_scan_pos++;
6217 				channel_index++;
6218 				scan->channels_list[channel_index] = channel;
6219 				index =
6220 				    libipw_channel_to_index(priv->ieee, channel);
6221 				ipw_set_scan_type(scan, channel_index,
6222 						  geo->bg[index].
6223 						  flags &
6224 						  LIBIPW_CH_PASSIVE_ONLY ?
6225 						  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6226 						  : scan_type);
6227 			}
6228 		} else {
6229 			for (i = 0; i < geo->bg_channels; i++) {
6230 				if ((priv->status & STATUS_ASSOCIATED) &&
6231 				    geo->bg[i].channel == priv->channel)
6232 					continue;
6233 				channel_index++;
6234 				scan->channels_list[channel_index] =
6235 				    geo->bg[i].channel;
6236 				ipw_set_scan_type(scan, channel_index,
6237 						  geo->bg[i].
6238 						  flags &
6239 						  LIBIPW_CH_PASSIVE_ONLY ?
6240 						  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6241 						  : scan_type);
6242 			}
6243 		}
6244 
6245 		if (start != channel_index) {
6246 			scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6247 			    (channel_index - start);
6248 		}
6249 	}
6250 }
6251 
6252 static int ipw_passive_dwell_time(struct ipw_priv *priv)
6253 {
6254 	/* staying on passive channels longer than the DTIM interval during a
6255 	 * scan, while associated, causes the firmware to cancel the scan
6256 	 * without notification. Hence, don't stay on passive channels longer
6257 	 * than the beacon interval.
6258 	 */
6259 	if (priv->status & STATUS_ASSOCIATED
6260 	    && priv->assoc_network->beacon_interval > 10)
6261 		return priv->assoc_network->beacon_interval - 10;
6262 	else
6263 		return 120;
6264 }
6265 
6266 static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
6267 {
6268 	struct ipw_scan_request_ext scan;
6269 	int err = 0, scan_type;
6270 
6271 	if (!(priv->status & STATUS_INIT) ||
6272 	    (priv->status & STATUS_EXIT_PENDING))
6273 		return 0;
6274 
6275 	mutex_lock(&priv->mutex);
6276 
6277 	if (direct && (priv->direct_scan_ssid_len == 0)) {
6278 		IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n");
6279 		priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6280 		goto done;
6281 	}
6282 
6283 	if (priv->status & STATUS_SCANNING) {
6284 		IPW_DEBUG_HC("Concurrent scan requested.  Queuing.\n");
6285 		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6286 					STATUS_SCAN_PENDING;
6287 		goto done;
6288 	}
6289 
6290 	if (!(priv->status & STATUS_SCAN_FORCED) &&
6291 	    priv->status & STATUS_SCAN_ABORTING) {
6292 		IPW_DEBUG_HC("Scan request while abort pending.  Queuing.\n");
6293 		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6294 					STATUS_SCAN_PENDING;
6295 		goto done;
6296 	}
6297 
6298 	if (priv->status & STATUS_RF_KILL_MASK) {
6299 		IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n");
6300 		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6301 					STATUS_SCAN_PENDING;
6302 		goto done;
6303 	}
6304 
6305 	memset(&scan, 0, sizeof(scan));
6306 	scan.full_scan_index = cpu_to_le32(libipw_get_scans(priv->ieee));
6307 
6308 	if (type == IW_SCAN_TYPE_PASSIVE) {
6309 		IPW_DEBUG_WX("use passive scanning\n");
6310 		scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6311 		scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6312 			cpu_to_le16(ipw_passive_dwell_time(priv));
6313 		ipw_add_scan_channels(priv, &scan, scan_type);
6314 		goto send_request;
6315 	}
6316 
6317 	/* Use active scan by default. */
6318 	if (priv->config & CFG_SPEED_SCAN)
6319 		scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6320 			cpu_to_le16(30);
6321 	else
6322 		scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6323 			cpu_to_le16(20);
6324 
6325 	scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6326 		cpu_to_le16(20);
6327 
6328 	scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6329 		cpu_to_le16(ipw_passive_dwell_time(priv));
6330 	scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
6331 
6332 #ifdef CONFIG_IPW2200_MONITOR
6333 	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6334 		u8 channel;
6335 		u8 band = 0;
6336 
6337 		switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
6338 		case LIBIPW_52GHZ_BAND:
6339 			band = (u8) (IPW_A_MODE << 6) | 1;
6340 			channel = priv->channel;
6341 			break;
6342 
6343 		case LIBIPW_24GHZ_BAND:
6344 			band = (u8) (IPW_B_MODE << 6) | 1;
6345 			channel = priv->channel;
6346 			break;
6347 
6348 		default:
6349 			band = (u8) (IPW_B_MODE << 6) | 1;
6350 			channel = 9;
6351 			break;
6352 		}
6353 
6354 		scan.channels_list[0] = band;
6355 		scan.channels_list[1] = channel;
6356 		ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6357 
6358 		/* NOTE:  The card will sit on this channel for this time
6359 		 * period.  Scan aborts are timing sensitive and frequently
6360 		 * result in firmware restarts.  As such, it is best to
6361 		 * set a small dwell_time here and just keep re-issuing
6362 		 * scans.  Otherwise fast channel hopping will not actually
6363 		 * hop channels.
6364 		 *
6365 		 * TODO: Move SPEED SCAN support to all modes and bands */
6366 		scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6367 			cpu_to_le16(2000);
6368 	} else {
6369 #endif				/* CONFIG_IPW2200_MONITOR */
6370 		/* Honor direct scans first, otherwise if we are roaming make
6371 		 * this a direct scan for the current network.  Finally,
6372 		 * ensure that every other scan is a fast channel hop scan */
6373 		if (direct) {
6374 			err = ipw_send_ssid(priv, priv->direct_scan_ssid,
6375 			                    priv->direct_scan_ssid_len);
6376 			if (err) {
6377 				IPW_DEBUG_HC("Attempt to send SSID command  "
6378 					     "failed\n");
6379 				goto done;
6380 			}
6381 
6382 			scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6383 		} else if ((priv->status & STATUS_ROAMING)
6384 			   || (!(priv->status & STATUS_ASSOCIATED)
6385 			       && (priv->config & CFG_STATIC_ESSID)
6386 			       && (le32_to_cpu(scan.full_scan_index) % 2))) {
6387 			err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6388 			if (err) {
6389 				IPW_DEBUG_HC("Attempt to send SSID command "
6390 					     "failed.\n");
6391 				goto done;
6392 			}
6393 
6394 			scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6395 		} else
6396 			scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6397 
6398 		ipw_add_scan_channels(priv, &scan, scan_type);
6399 #ifdef CONFIG_IPW2200_MONITOR
6400 	}
6401 #endif
6402 
6403 send_request:
6404 	err = ipw_send_scan_request_ext(priv, &scan);
6405 	if (err) {
6406 		IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6407 		goto done;
6408 	}
6409 
6410 	priv->status |= STATUS_SCANNING;
6411 	if (direct) {
6412 		priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6413 		priv->direct_scan_ssid_len = 0;
6414 	} else
6415 		priv->status &= ~STATUS_SCAN_PENDING;
6416 
6417 	schedule_delayed_work(&priv->scan_check, IPW_SCAN_CHECK_WATCHDOG);
6418 done:
6419 	mutex_unlock(&priv->mutex);
6420 	return err;
6421 }
6422 
6423 static void ipw_request_passive_scan(struct work_struct *work)
6424 {
6425 	struct ipw_priv *priv =
6426 		container_of(work, struct ipw_priv, request_passive_scan.work);
6427 	ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0);
6428 }
6429 
6430 static void ipw_request_scan(struct work_struct *work)
6431 {
6432 	struct ipw_priv *priv =
6433 		container_of(work, struct ipw_priv, request_scan.work);
6434 	ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0);
6435 }
6436 
6437 static void ipw_request_direct_scan(struct work_struct *work)
6438 {
6439 	struct ipw_priv *priv =
6440 		container_of(work, struct ipw_priv, request_direct_scan.work);
6441 	ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1);
6442 }
6443 
6444 static void ipw_bg_abort_scan(struct work_struct *work)
6445 {
6446 	struct ipw_priv *priv =
6447 		container_of(work, struct ipw_priv, abort_scan);
6448 	mutex_lock(&priv->mutex);
6449 	ipw_abort_scan(priv);
6450 	mutex_unlock(&priv->mutex);
6451 }
6452 
6453 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6454 {
6455 	/* This is called when wpa_supplicant loads and closes the driver
6456 	 * interface. */
6457 	priv->ieee->wpa_enabled = value;
6458 	return 0;
6459 }
6460 
6461 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6462 {
6463 	struct libipw_device *ieee = priv->ieee;
6464 	struct libipw_security sec = {
6465 		.flags = SEC_AUTH_MODE,
6466 	};
6467 	int ret = 0;
6468 
6469 	if (value & IW_AUTH_ALG_SHARED_KEY) {
6470 		sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6471 		ieee->open_wep = 0;
6472 	} else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6473 		sec.auth_mode = WLAN_AUTH_OPEN;
6474 		ieee->open_wep = 1;
6475 	} else if (value & IW_AUTH_ALG_LEAP) {
6476 		sec.auth_mode = WLAN_AUTH_LEAP;
6477 		ieee->open_wep = 1;
6478 	} else
6479 		return -EINVAL;
6480 
6481 	if (ieee->set_security)
6482 		ieee->set_security(ieee->dev, &sec);
6483 	else
6484 		ret = -EOPNOTSUPP;
6485 
6486 	return ret;
6487 }
6488 
6489 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6490 				int wpa_ie_len)
6491 {
6492 	/* make sure WPA is enabled */
6493 	ipw_wpa_enable(priv, 1);
6494 }
6495 
6496 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6497 			    char *capabilities, int length)
6498 {
6499 	IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6500 
6501 	return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6502 				capabilities);
6503 }
6504 
6505 /*
6506  * WE-18 support
6507  */
6508 
6509 /* SIOCSIWGENIE */
6510 static int ipw_wx_set_genie(struct net_device *dev,
6511 			    struct iw_request_info *info,
6512 			    union iwreq_data *wrqu, char *extra)
6513 {
6514 	struct ipw_priv *priv = libipw_priv(dev);
6515 	struct libipw_device *ieee = priv->ieee;
6516 	u8 *buf;
6517 	int err = 0;
6518 
6519 	if (wrqu->data.length > MAX_WPA_IE_LEN ||
6520 	    (wrqu->data.length && extra == NULL))
6521 		return -EINVAL;
6522 
6523 	if (wrqu->data.length) {
6524 		buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
6525 		if (buf == NULL) {
6526 			err = -ENOMEM;
6527 			goto out;
6528 		}
6529 
6530 		kfree(ieee->wpa_ie);
6531 		ieee->wpa_ie = buf;
6532 		ieee->wpa_ie_len = wrqu->data.length;
6533 	} else {
6534 		kfree(ieee->wpa_ie);
6535 		ieee->wpa_ie = NULL;
6536 		ieee->wpa_ie_len = 0;
6537 	}
6538 
6539 	ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6540       out:
6541 	return err;
6542 }
6543 
6544 /* SIOCGIWGENIE */
6545 static int ipw_wx_get_genie(struct net_device *dev,
6546 			    struct iw_request_info *info,
6547 			    union iwreq_data *wrqu, char *extra)
6548 {
6549 	struct ipw_priv *priv = libipw_priv(dev);
6550 	struct libipw_device *ieee = priv->ieee;
6551 	int err = 0;
6552 
6553 	if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6554 		wrqu->data.length = 0;
6555 		goto out;
6556 	}
6557 
6558 	if (wrqu->data.length < ieee->wpa_ie_len) {
6559 		err = -E2BIG;
6560 		goto out;
6561 	}
6562 
6563 	wrqu->data.length = ieee->wpa_ie_len;
6564 	memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6565 
6566       out:
6567 	return err;
6568 }
6569 
6570 static int wext_cipher2level(int cipher)
6571 {
6572 	switch (cipher) {
6573 	case IW_AUTH_CIPHER_NONE:
6574 		return SEC_LEVEL_0;
6575 	case IW_AUTH_CIPHER_WEP40:
6576 	case IW_AUTH_CIPHER_WEP104:
6577 		return SEC_LEVEL_1;
6578 	case IW_AUTH_CIPHER_TKIP:
6579 		return SEC_LEVEL_2;
6580 	case IW_AUTH_CIPHER_CCMP:
6581 		return SEC_LEVEL_3;
6582 	default:
6583 		return -1;
6584 	}
6585 }
6586 
6587 /* SIOCSIWAUTH */
6588 static int ipw_wx_set_auth(struct net_device *dev,
6589 			   struct iw_request_info *info,
6590 			   union iwreq_data *wrqu, char *extra)
6591 {
6592 	struct ipw_priv *priv = libipw_priv(dev);
6593 	struct libipw_device *ieee = priv->ieee;
6594 	struct iw_param *param = &wrqu->param;
6595 	struct lib80211_crypt_data *crypt;
6596 	unsigned long flags;
6597 	int ret = 0;
6598 
6599 	switch (param->flags & IW_AUTH_INDEX) {
6600 	case IW_AUTH_WPA_VERSION:
6601 		break;
6602 	case IW_AUTH_CIPHER_PAIRWISE:
6603 		ipw_set_hw_decrypt_unicast(priv,
6604 					   wext_cipher2level(param->value));
6605 		break;
6606 	case IW_AUTH_CIPHER_GROUP:
6607 		ipw_set_hw_decrypt_multicast(priv,
6608 					     wext_cipher2level(param->value));
6609 		break;
6610 	case IW_AUTH_KEY_MGMT:
6611 		/*
6612 		 * ipw2200 does not use these parameters
6613 		 */
6614 		break;
6615 
6616 	case IW_AUTH_TKIP_COUNTERMEASURES:
6617 		crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6618 		if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6619 			break;
6620 
6621 		flags = crypt->ops->get_flags(crypt->priv);
6622 
6623 		if (param->value)
6624 			flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6625 		else
6626 			flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6627 
6628 		crypt->ops->set_flags(flags, crypt->priv);
6629 
6630 		break;
6631 
6632 	case IW_AUTH_DROP_UNENCRYPTED:{
6633 			/* HACK:
6634 			 *
6635 			 * wpa_supplicant calls set_wpa_enabled when the driver
6636 			 * is loaded and unloaded, regardless of if WPA is being
6637 			 * used.  No other calls are made which can be used to
6638 			 * determine if encryption will be used or not prior to
6639 			 * association being expected.  If encryption is not being
6640 			 * used, drop_unencrypted is set to false, else true -- we
6641 			 * can use this to determine if the CAP_PRIVACY_ON bit should
6642 			 * be set.
6643 			 */
6644 			struct libipw_security sec = {
6645 				.flags = SEC_ENABLED,
6646 				.enabled = param->value,
6647 			};
6648 			priv->ieee->drop_unencrypted = param->value;
6649 			/* We only change SEC_LEVEL for open mode. Others
6650 			 * are set by ipw_wpa_set_encryption.
6651 			 */
6652 			if (!param->value) {
6653 				sec.flags |= SEC_LEVEL;
6654 				sec.level = SEC_LEVEL_0;
6655 			} else {
6656 				sec.flags |= SEC_LEVEL;
6657 				sec.level = SEC_LEVEL_1;
6658 			}
6659 			if (priv->ieee->set_security)
6660 				priv->ieee->set_security(priv->ieee->dev, &sec);
6661 			break;
6662 		}
6663 
6664 	case IW_AUTH_80211_AUTH_ALG:
6665 		ret = ipw_wpa_set_auth_algs(priv, param->value);
6666 		break;
6667 
6668 	case IW_AUTH_WPA_ENABLED:
6669 		ret = ipw_wpa_enable(priv, param->value);
6670 		ipw_disassociate(priv);
6671 		break;
6672 
6673 	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6674 		ieee->ieee802_1x = param->value;
6675 		break;
6676 
6677 	case IW_AUTH_PRIVACY_INVOKED:
6678 		ieee->privacy_invoked = param->value;
6679 		break;
6680 
6681 	default:
6682 		return -EOPNOTSUPP;
6683 	}
6684 	return ret;
6685 }
6686 
6687 /* SIOCGIWAUTH */
6688 static int ipw_wx_get_auth(struct net_device *dev,
6689 			   struct iw_request_info *info,
6690 			   union iwreq_data *wrqu, char *extra)
6691 {
6692 	struct ipw_priv *priv = libipw_priv(dev);
6693 	struct libipw_device *ieee = priv->ieee;
6694 	struct lib80211_crypt_data *crypt;
6695 	struct iw_param *param = &wrqu->param;
6696 
6697 	switch (param->flags & IW_AUTH_INDEX) {
6698 	case IW_AUTH_WPA_VERSION:
6699 	case IW_AUTH_CIPHER_PAIRWISE:
6700 	case IW_AUTH_CIPHER_GROUP:
6701 	case IW_AUTH_KEY_MGMT:
6702 		/*
6703 		 * wpa_supplicant will control these internally
6704 		 */
6705 		return -EOPNOTSUPP;
6706 
6707 	case IW_AUTH_TKIP_COUNTERMEASURES:
6708 		crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6709 		if (!crypt || !crypt->ops->get_flags)
6710 			break;
6711 
6712 		param->value = (crypt->ops->get_flags(crypt->priv) &
6713 				IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6714 
6715 		break;
6716 
6717 	case IW_AUTH_DROP_UNENCRYPTED:
6718 		param->value = ieee->drop_unencrypted;
6719 		break;
6720 
6721 	case IW_AUTH_80211_AUTH_ALG:
6722 		param->value = ieee->sec.auth_mode;
6723 		break;
6724 
6725 	case IW_AUTH_WPA_ENABLED:
6726 		param->value = ieee->wpa_enabled;
6727 		break;
6728 
6729 	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6730 		param->value = ieee->ieee802_1x;
6731 		break;
6732 
6733 	case IW_AUTH_ROAMING_CONTROL:
6734 	case IW_AUTH_PRIVACY_INVOKED:
6735 		param->value = ieee->privacy_invoked;
6736 		break;
6737 
6738 	default:
6739 		return -EOPNOTSUPP;
6740 	}
6741 	return 0;
6742 }
6743 
6744 /* SIOCSIWENCODEEXT */
6745 static int ipw_wx_set_encodeext(struct net_device *dev,
6746 				struct iw_request_info *info,
6747 				union iwreq_data *wrqu, char *extra)
6748 {
6749 	struct ipw_priv *priv = libipw_priv(dev);
6750 	struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6751 
6752 	if (hwcrypto) {
6753 		if (ext->alg == IW_ENCODE_ALG_TKIP) {
6754 			/* IPW HW can't build TKIP MIC,
6755 			   host decryption still needed */
6756 			if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6757 				priv->ieee->host_mc_decrypt = 1;
6758 			else {
6759 				priv->ieee->host_encrypt = 0;
6760 				priv->ieee->host_encrypt_msdu = 1;
6761 				priv->ieee->host_decrypt = 1;
6762 			}
6763 		} else {
6764 			priv->ieee->host_encrypt = 0;
6765 			priv->ieee->host_encrypt_msdu = 0;
6766 			priv->ieee->host_decrypt = 0;
6767 			priv->ieee->host_mc_decrypt = 0;
6768 		}
6769 	}
6770 
6771 	return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6772 }
6773 
6774 /* SIOCGIWENCODEEXT */
6775 static int ipw_wx_get_encodeext(struct net_device *dev,
6776 				struct iw_request_info *info,
6777 				union iwreq_data *wrqu, char *extra)
6778 {
6779 	struct ipw_priv *priv = libipw_priv(dev);
6780 	return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6781 }
6782 
6783 /* SIOCSIWMLME */
6784 static int ipw_wx_set_mlme(struct net_device *dev,
6785 			   struct iw_request_info *info,
6786 			   union iwreq_data *wrqu, char *extra)
6787 {
6788 	struct ipw_priv *priv = libipw_priv(dev);
6789 	struct iw_mlme *mlme = (struct iw_mlme *)extra;
6790 
6791 	switch (mlme->cmd) {
6792 	case IW_MLME_DEAUTH:
6793 		/* silently ignore */
6794 		break;
6795 
6796 	case IW_MLME_DISASSOC:
6797 		ipw_disassociate(priv);
6798 		break;
6799 
6800 	default:
6801 		return -EOPNOTSUPP;
6802 	}
6803 	return 0;
6804 }
6805 
6806 #ifdef CONFIG_IPW2200_QOS
6807 
6808 /* QoS */
6809 /*
6810 * get the modulation type of the current network or
6811 * the card current mode
6812 */
6813 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6814 {
6815 	u8 mode = 0;
6816 
6817 	if (priv->status & STATUS_ASSOCIATED) {
6818 		unsigned long flags;
6819 
6820 		spin_lock_irqsave(&priv->ieee->lock, flags);
6821 		mode = priv->assoc_network->mode;
6822 		spin_unlock_irqrestore(&priv->ieee->lock, flags);
6823 	} else {
6824 		mode = priv->ieee->mode;
6825 	}
6826 	IPW_DEBUG_QOS("QoS network/card mode %d\n", mode);
6827 	return mode;
6828 }
6829 
6830 /*
6831 * Handle management frame beacon and probe response
6832 */
6833 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6834 					 int active_network,
6835 					 struct libipw_network *network)
6836 {
6837 	u32 size = sizeof(struct libipw_qos_parameters);
6838 
6839 	if (network->capability & WLAN_CAPABILITY_IBSS)
6840 		network->qos_data.active = network->qos_data.supported;
6841 
6842 	if (network->flags & NETWORK_HAS_QOS_MASK) {
6843 		if (active_network &&
6844 		    (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6845 			network->qos_data.active = network->qos_data.supported;
6846 
6847 		if ((network->qos_data.active == 1) && (active_network == 1) &&
6848 		    (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6849 		    (network->qos_data.old_param_count !=
6850 		     network->qos_data.param_count)) {
6851 			network->qos_data.old_param_count =
6852 			    network->qos_data.param_count;
6853 			schedule_work(&priv->qos_activate);
6854 			IPW_DEBUG_QOS("QoS parameters change call "
6855 				      "qos_activate\n");
6856 		}
6857 	} else {
6858 		if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6859 			memcpy(&network->qos_data.parameters,
6860 			       &def_parameters_CCK, size);
6861 		else
6862 			memcpy(&network->qos_data.parameters,
6863 			       &def_parameters_OFDM, size);
6864 
6865 		if ((network->qos_data.active == 1) && (active_network == 1)) {
6866 			IPW_DEBUG_QOS("QoS was disabled call qos_activate\n");
6867 			schedule_work(&priv->qos_activate);
6868 		}
6869 
6870 		network->qos_data.active = 0;
6871 		network->qos_data.supported = 0;
6872 	}
6873 	if ((priv->status & STATUS_ASSOCIATED) &&
6874 	    (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6875 		if (!ether_addr_equal(network->bssid, priv->bssid))
6876 			if (network->capability & WLAN_CAPABILITY_IBSS)
6877 				if ((network->ssid_len ==
6878 				     priv->assoc_network->ssid_len) &&
6879 				    !memcmp(network->ssid,
6880 					    priv->assoc_network->ssid,
6881 					    network->ssid_len)) {
6882 					schedule_work(&priv->merge_networks);
6883 				}
6884 	}
6885 
6886 	return 0;
6887 }
6888 
6889 /*
6890 * This function set up the firmware to support QoS. It sends
6891 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6892 */
6893 static int ipw_qos_activate(struct ipw_priv *priv,
6894 			    struct libipw_qos_data *qos_network_data)
6895 {
6896 	int err;
6897 	struct libipw_qos_parameters qos_parameters[QOS_QOS_SETS];
6898 	struct libipw_qos_parameters *active_one = NULL;
6899 	u32 size = sizeof(struct libipw_qos_parameters);
6900 	u32 burst_duration;
6901 	int i;
6902 	u8 type;
6903 
6904 	type = ipw_qos_current_mode(priv);
6905 
6906 	active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6907 	memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6908 	active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6909 	memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6910 
6911 	if (qos_network_data == NULL) {
6912 		if (type == IEEE_B) {
6913 			IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6914 			active_one = &def_parameters_CCK;
6915 		} else
6916 			active_one = &def_parameters_OFDM;
6917 
6918 		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6919 		burst_duration = ipw_qos_get_burst_duration(priv);
6920 		for (i = 0; i < QOS_QUEUE_NUM; i++)
6921 			qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6922 			    cpu_to_le16(burst_duration);
6923 	} else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6924 		if (type == IEEE_B) {
6925 			IPW_DEBUG_QOS("QoS activate IBSS network mode %d\n",
6926 				      type);
6927 			if (priv->qos_data.qos_enable == 0)
6928 				active_one = &def_parameters_CCK;
6929 			else
6930 				active_one = priv->qos_data.def_qos_parm_CCK;
6931 		} else {
6932 			if (priv->qos_data.qos_enable == 0)
6933 				active_one = &def_parameters_OFDM;
6934 			else
6935 				active_one = priv->qos_data.def_qos_parm_OFDM;
6936 		}
6937 		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6938 	} else {
6939 		unsigned long flags;
6940 		int active;
6941 
6942 		spin_lock_irqsave(&priv->ieee->lock, flags);
6943 		active_one = &(qos_network_data->parameters);
6944 		qos_network_data->old_param_count =
6945 		    qos_network_data->param_count;
6946 		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6947 		active = qos_network_data->supported;
6948 		spin_unlock_irqrestore(&priv->ieee->lock, flags);
6949 
6950 		if (active == 0) {
6951 			burst_duration = ipw_qos_get_burst_duration(priv);
6952 			for (i = 0; i < QOS_QUEUE_NUM; i++)
6953 				qos_parameters[QOS_PARAM_SET_ACTIVE].
6954 				    tx_op_limit[i] = cpu_to_le16(burst_duration);
6955 		}
6956 	}
6957 
6958 	IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6959 	err = ipw_send_qos_params_command(priv, &qos_parameters[0]);
6960 	if (err)
6961 		IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6962 
6963 	return err;
6964 }
6965 
6966 /*
6967 * send IPW_CMD_WME_INFO to the firmware
6968 */
6969 static int ipw_qos_set_info_element(struct ipw_priv *priv)
6970 {
6971 	int ret = 0;
6972 	struct libipw_qos_information_element qos_info;
6973 
6974 	if (priv == NULL)
6975 		return -1;
6976 
6977 	qos_info.elementID = QOS_ELEMENT_ID;
6978 	qos_info.length = sizeof(struct libipw_qos_information_element) - 2;
6979 
6980 	qos_info.version = QOS_VERSION_1;
6981 	qos_info.ac_info = 0;
6982 
6983 	memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
6984 	qos_info.qui_type = QOS_OUI_TYPE;
6985 	qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
6986 
6987 	ret = ipw_send_qos_info_command(priv, &qos_info);
6988 	if (ret != 0) {
6989 		IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
6990 	}
6991 	return ret;
6992 }
6993 
6994 /*
6995 * Set the QoS parameter with the association request structure
6996 */
6997 static int ipw_qos_association(struct ipw_priv *priv,
6998 			       struct libipw_network *network)
6999 {
7000 	int err = 0;
7001 	struct libipw_qos_data *qos_data = NULL;
7002 	struct libipw_qos_data ibss_data = {
7003 		.supported = 1,
7004 		.active = 1,
7005 	};
7006 
7007 	switch (priv->ieee->iw_mode) {
7008 	case IW_MODE_ADHOC:
7009 		BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
7010 
7011 		qos_data = &ibss_data;
7012 		break;
7013 
7014 	case IW_MODE_INFRA:
7015 		qos_data = &network->qos_data;
7016 		break;
7017 
7018 	default:
7019 		BUG();
7020 		break;
7021 	}
7022 
7023 	err = ipw_qos_activate(priv, qos_data);
7024 	if (err) {
7025 		priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
7026 		return err;
7027 	}
7028 
7029 	if (priv->qos_data.qos_enable && qos_data->supported) {
7030 		IPW_DEBUG_QOS("QoS will be enabled for this association\n");
7031 		priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
7032 		return ipw_qos_set_info_element(priv);
7033 	}
7034 
7035 	return 0;
7036 }
7037 
7038 /*
7039 * handling the beaconing responses. if we get different QoS setting
7040 * off the network from the associated setting, adjust the QoS
7041 * setting
7042 */
7043 static void ipw_qos_association_resp(struct ipw_priv *priv,
7044 				    struct libipw_network *network)
7045 {
7046 	unsigned long flags;
7047 	u32 size = sizeof(struct libipw_qos_parameters);
7048 	int set_qos_param = 0;
7049 
7050 	if ((priv == NULL) || (network == NULL) ||
7051 	    (priv->assoc_network == NULL))
7052 		return;
7053 
7054 	if (!(priv->status & STATUS_ASSOCIATED))
7055 		return;
7056 
7057 	if ((priv->ieee->iw_mode != IW_MODE_INFRA))
7058 		return;
7059 
7060 	spin_lock_irqsave(&priv->ieee->lock, flags);
7061 	if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7062 		memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7063 		       sizeof(struct libipw_qos_data));
7064 		priv->assoc_network->qos_data.active = 1;
7065 		if ((network->qos_data.old_param_count !=
7066 		     network->qos_data.param_count)) {
7067 			set_qos_param = 1;
7068 			network->qos_data.old_param_count =
7069 			    network->qos_data.param_count;
7070 		}
7071 
7072 	} else {
7073 		if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7074 			memcpy(&priv->assoc_network->qos_data.parameters,
7075 			       &def_parameters_CCK, size);
7076 		else
7077 			memcpy(&priv->assoc_network->qos_data.parameters,
7078 			       &def_parameters_OFDM, size);
7079 		priv->assoc_network->qos_data.active = 0;
7080 		priv->assoc_network->qos_data.supported = 0;
7081 		set_qos_param = 1;
7082 	}
7083 
7084 	spin_unlock_irqrestore(&priv->ieee->lock, flags);
7085 
7086 	if (set_qos_param == 1)
7087 		schedule_work(&priv->qos_activate);
7088 }
7089 
7090 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7091 {
7092 	u32 ret = 0;
7093 
7094 	if (!priv)
7095 		return 0;
7096 
7097 	if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION))
7098 		ret = priv->qos_data.burst_duration_CCK;
7099 	else
7100 		ret = priv->qos_data.burst_duration_OFDM;
7101 
7102 	return ret;
7103 }
7104 
7105 /*
7106 * Initialize the setting of QoS global
7107 */
7108 static void ipw_qos_init(struct ipw_priv *priv, int enable,
7109 			 int burst_enable, u32 burst_duration_CCK,
7110 			 u32 burst_duration_OFDM)
7111 {
7112 	priv->qos_data.qos_enable = enable;
7113 
7114 	if (priv->qos_data.qos_enable) {
7115 		priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7116 		priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7117 		IPW_DEBUG_QOS("QoS is enabled\n");
7118 	} else {
7119 		priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7120 		priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7121 		IPW_DEBUG_QOS("QoS is not enabled\n");
7122 	}
7123 
7124 	priv->qos_data.burst_enable = burst_enable;
7125 
7126 	if (burst_enable) {
7127 		priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7128 		priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7129 	} else {
7130 		priv->qos_data.burst_duration_CCK = 0;
7131 		priv->qos_data.burst_duration_OFDM = 0;
7132 	}
7133 }
7134 
7135 /*
7136 * map the packet priority to the right TX Queue
7137 */
7138 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7139 {
7140 	if (priority > 7 || !priv->qos_data.qos_enable)
7141 		priority = 0;
7142 
7143 	return from_priority_to_tx_queue[priority] - 1;
7144 }
7145 
7146 static int ipw_is_qos_active(struct net_device *dev,
7147 			     struct sk_buff *skb)
7148 {
7149 	struct ipw_priv *priv = libipw_priv(dev);
7150 	struct libipw_qos_data *qos_data = NULL;
7151 	int active, supported;
7152 	u8 *daddr = skb->data + ETH_ALEN;
7153 	int unicast = !is_multicast_ether_addr(daddr);
7154 
7155 	if (!(priv->status & STATUS_ASSOCIATED))
7156 		return 0;
7157 
7158 	qos_data = &priv->assoc_network->qos_data;
7159 
7160 	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7161 		if (unicast == 0)
7162 			qos_data->active = 0;
7163 		else
7164 			qos_data->active = qos_data->supported;
7165 	}
7166 	active = qos_data->active;
7167 	supported = qos_data->supported;
7168 	IPW_DEBUG_QOS("QoS  %d network is QoS active %d  supported %d  "
7169 		      "unicast %d\n",
7170 		      priv->qos_data.qos_enable, active, supported, unicast);
7171 	if (active && priv->qos_data.qos_enable)
7172 		return 1;
7173 
7174 	return 0;
7175 
7176 }
7177 /*
7178 * add QoS parameter to the TX command
7179 */
7180 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7181 					u16 priority,
7182 					struct tfd_data *tfd)
7183 {
7184 	int tx_queue_id = 0;
7185 
7186 
7187 	tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7188 	tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7189 
7190 	if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7191 		tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7192 		tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7193 	}
7194 	return 0;
7195 }
7196 
7197 /*
7198 * background support to run QoS activate functionality
7199 */
7200 static void ipw_bg_qos_activate(struct work_struct *work)
7201 {
7202 	struct ipw_priv *priv =
7203 		container_of(work, struct ipw_priv, qos_activate);
7204 
7205 	mutex_lock(&priv->mutex);
7206 
7207 	if (priv->status & STATUS_ASSOCIATED)
7208 		ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7209 
7210 	mutex_unlock(&priv->mutex);
7211 }
7212 
7213 static int ipw_handle_probe_response(struct net_device *dev,
7214 				     struct libipw_probe_response *resp,
7215 				     struct libipw_network *network)
7216 {
7217 	struct ipw_priv *priv = libipw_priv(dev);
7218 	int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7219 			      (network == priv->assoc_network));
7220 
7221 	ipw_qos_handle_probe_response(priv, active_network, network);
7222 
7223 	return 0;
7224 }
7225 
7226 static int ipw_handle_beacon(struct net_device *dev,
7227 			     struct libipw_beacon *resp,
7228 			     struct libipw_network *network)
7229 {
7230 	struct ipw_priv *priv = libipw_priv(dev);
7231 	int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7232 			      (network == priv->assoc_network));
7233 
7234 	ipw_qos_handle_probe_response(priv, active_network, network);
7235 
7236 	return 0;
7237 }
7238 
7239 static int ipw_handle_assoc_response(struct net_device *dev,
7240 				     struct libipw_assoc_response *resp,
7241 				     struct libipw_network *network)
7242 {
7243 	struct ipw_priv *priv = libipw_priv(dev);
7244 	ipw_qos_association_resp(priv, network);
7245 	return 0;
7246 }
7247 
7248 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
7249 				       *qos_param)
7250 {
7251 	return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7252 				sizeof(*qos_param) * 3, qos_param);
7253 }
7254 
7255 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
7256 				     *qos_param)
7257 {
7258 	return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7259 				qos_param);
7260 }
7261 
7262 #endif				/* CONFIG_IPW2200_QOS */
7263 
7264 static int ipw_associate_network(struct ipw_priv *priv,
7265 				 struct libipw_network *network,
7266 				 struct ipw_supported_rates *rates, int roaming)
7267 {
7268 	int err;
7269 
7270 	if (priv->config & CFG_FIXED_RATE)
7271 		ipw_set_fixed_rate(priv, network->mode);
7272 
7273 	if (!(priv->config & CFG_STATIC_ESSID)) {
7274 		priv->essid_len = min(network->ssid_len,
7275 				      (u8) IW_ESSID_MAX_SIZE);
7276 		memcpy(priv->essid, network->ssid, priv->essid_len);
7277 	}
7278 
7279 	network->last_associate = jiffies;
7280 
7281 	memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7282 	priv->assoc_request.channel = network->channel;
7283 	priv->assoc_request.auth_key = 0;
7284 
7285 	if ((priv->capability & CAP_PRIVACY_ON) &&
7286 	    (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7287 		priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7288 		priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7289 
7290 		if (priv->ieee->sec.level == SEC_LEVEL_1)
7291 			ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7292 
7293 	} else if ((priv->capability & CAP_PRIVACY_ON) &&
7294 		   (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7295 		priv->assoc_request.auth_type = AUTH_LEAP;
7296 	else
7297 		priv->assoc_request.auth_type = AUTH_OPEN;
7298 
7299 	if (priv->ieee->wpa_ie_len) {
7300 		priv->assoc_request.policy_support = cpu_to_le16(0x02);	/* RSN active */
7301 		ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7302 				 priv->ieee->wpa_ie_len);
7303 	}
7304 
7305 	/*
7306 	 * It is valid for our ieee device to support multiple modes, but
7307 	 * when it comes to associating to a given network we have to choose
7308 	 * just one mode.
7309 	 */
7310 	if (network->mode & priv->ieee->mode & IEEE_A)
7311 		priv->assoc_request.ieee_mode = IPW_A_MODE;
7312 	else if (network->mode & priv->ieee->mode & IEEE_G)
7313 		priv->assoc_request.ieee_mode = IPW_G_MODE;
7314 	else if (network->mode & priv->ieee->mode & IEEE_B)
7315 		priv->assoc_request.ieee_mode = IPW_B_MODE;
7316 
7317 	priv->assoc_request.capability = cpu_to_le16(network->capability);
7318 	if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7319 	    && !(priv->config & CFG_PREAMBLE_LONG)) {
7320 		priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7321 	} else {
7322 		priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7323 
7324 		/* Clear the short preamble if we won't be supporting it */
7325 		priv->assoc_request.capability &=
7326 		    ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
7327 	}
7328 
7329 	/* Clear capability bits that aren't used in Ad Hoc */
7330 	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7331 		priv->assoc_request.capability &=
7332 		    ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
7333 
7334 	IPW_DEBUG_ASSOC("%ssociation attempt: '%*pE', channel %d, 802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7335 			roaming ? "Rea" : "A",
7336 			priv->essid_len, priv->essid,
7337 			network->channel,
7338 			ipw_modes[priv->assoc_request.ieee_mode],
7339 			rates->num_rates,
7340 			(priv->assoc_request.preamble_length ==
7341 			 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7342 			network->capability &
7343 			WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7344 			priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7345 			priv->capability & CAP_PRIVACY_ON ?
7346 			(priv->capability & CAP_SHARED_KEY ? "(shared)" :
7347 			 "(open)") : "",
7348 			priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7349 			priv->capability & CAP_PRIVACY_ON ?
7350 			'1' + priv->ieee->sec.active_key : '.',
7351 			priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7352 
7353 	priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval);
7354 	if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7355 	    (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7356 		priv->assoc_request.assoc_type = HC_IBSS_START;
7357 		priv->assoc_request.assoc_tsf_msw = 0;
7358 		priv->assoc_request.assoc_tsf_lsw = 0;
7359 	} else {
7360 		if (unlikely(roaming))
7361 			priv->assoc_request.assoc_type = HC_REASSOCIATE;
7362 		else
7363 			priv->assoc_request.assoc_type = HC_ASSOCIATE;
7364 		priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]);
7365 		priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]);
7366 	}
7367 
7368 	memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7369 
7370 	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7371 		eth_broadcast_addr(priv->assoc_request.dest);
7372 		priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
7373 	} else {
7374 		memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7375 		priv->assoc_request.atim_window = 0;
7376 	}
7377 
7378 	priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval);
7379 
7380 	err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7381 	if (err) {
7382 		IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7383 		return err;
7384 	}
7385 
7386 	rates->ieee_mode = priv->assoc_request.ieee_mode;
7387 	rates->purpose = IPW_RATE_CONNECT;
7388 	ipw_send_supported_rates(priv, rates);
7389 
7390 	if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7391 		priv->sys_config.dot11g_auto_detection = 1;
7392 	else
7393 		priv->sys_config.dot11g_auto_detection = 0;
7394 
7395 	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7396 		priv->sys_config.answer_broadcast_ssid_probe = 1;
7397 	else
7398 		priv->sys_config.answer_broadcast_ssid_probe = 0;
7399 
7400 	err = ipw_send_system_config(priv);
7401 	if (err) {
7402 		IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7403 		return err;
7404 	}
7405 
7406 	IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7407 	err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7408 	if (err) {
7409 		IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7410 		return err;
7411 	}
7412 
7413 	/*
7414 	 * If preemption is enabled, it is possible for the association
7415 	 * to complete before we return from ipw_send_associate.  Therefore
7416 	 * we have to be sure and update our priviate data first.
7417 	 */
7418 	priv->channel = network->channel;
7419 	memcpy(priv->bssid, network->bssid, ETH_ALEN);
7420 	priv->status |= STATUS_ASSOCIATING;
7421 	priv->status &= ~STATUS_SECURITY_UPDATED;
7422 
7423 	priv->assoc_network = network;
7424 
7425 #ifdef CONFIG_IPW2200_QOS
7426 	ipw_qos_association(priv, network);
7427 #endif
7428 
7429 	err = ipw_send_associate(priv, &priv->assoc_request);
7430 	if (err) {
7431 		IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7432 		return err;
7433 	}
7434 
7435 	IPW_DEBUG(IPW_DL_STATE, "associating: '%*pE' %pM\n",
7436 		  priv->essid_len, priv->essid, priv->bssid);
7437 
7438 	return 0;
7439 }
7440 
7441 static void ipw_roam(void *data)
7442 {
7443 	struct ipw_priv *priv = data;
7444 	struct libipw_network *network = NULL;
7445 	struct ipw_network_match match = {
7446 		.network = priv->assoc_network
7447 	};
7448 
7449 	/* The roaming process is as follows:
7450 	 *
7451 	 * 1.  Missed beacon threshold triggers the roaming process by
7452 	 *     setting the status ROAM bit and requesting a scan.
7453 	 * 2.  When the scan completes, it schedules the ROAM work
7454 	 * 3.  The ROAM work looks at all of the known networks for one that
7455 	 *     is a better network than the currently associated.  If none
7456 	 *     found, the ROAM process is over (ROAM bit cleared)
7457 	 * 4.  If a better network is found, a disassociation request is
7458 	 *     sent.
7459 	 * 5.  When the disassociation completes, the roam work is again
7460 	 *     scheduled.  The second time through, the driver is no longer
7461 	 *     associated, and the newly selected network is sent an
7462 	 *     association request.
7463 	 * 6.  At this point ,the roaming process is complete and the ROAM
7464 	 *     status bit is cleared.
7465 	 */
7466 
7467 	/* If we are no longer associated, and the roaming bit is no longer
7468 	 * set, then we are not actively roaming, so just return */
7469 	if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7470 		return;
7471 
7472 	if (priv->status & STATUS_ASSOCIATED) {
7473 		/* First pass through ROAM process -- look for a better
7474 		 * network */
7475 		unsigned long flags;
7476 		u8 rssi = priv->assoc_network->stats.rssi;
7477 		priv->assoc_network->stats.rssi = -128;
7478 		spin_lock_irqsave(&priv->ieee->lock, flags);
7479 		list_for_each_entry(network, &priv->ieee->network_list, list) {
7480 			if (network != priv->assoc_network)
7481 				ipw_best_network(priv, &match, network, 1);
7482 		}
7483 		spin_unlock_irqrestore(&priv->ieee->lock, flags);
7484 		priv->assoc_network->stats.rssi = rssi;
7485 
7486 		if (match.network == priv->assoc_network) {
7487 			IPW_DEBUG_ASSOC("No better APs in this network to "
7488 					"roam to.\n");
7489 			priv->status &= ~STATUS_ROAMING;
7490 			ipw_debug_config(priv);
7491 			return;
7492 		}
7493 
7494 		ipw_send_disassociate(priv, 1);
7495 		priv->assoc_network = match.network;
7496 
7497 		return;
7498 	}
7499 
7500 	/* Second pass through ROAM process -- request association */
7501 	ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7502 	ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7503 	priv->status &= ~STATUS_ROAMING;
7504 }
7505 
7506 static void ipw_bg_roam(struct work_struct *work)
7507 {
7508 	struct ipw_priv *priv =
7509 		container_of(work, struct ipw_priv, roam);
7510 	mutex_lock(&priv->mutex);
7511 	ipw_roam(priv);
7512 	mutex_unlock(&priv->mutex);
7513 }
7514 
7515 static int ipw_associate(void *data)
7516 {
7517 	struct ipw_priv *priv = data;
7518 
7519 	struct libipw_network *network = NULL;
7520 	struct ipw_network_match match = {
7521 		.network = NULL
7522 	};
7523 	struct ipw_supported_rates *rates;
7524 	struct list_head *element;
7525 	unsigned long flags;
7526 
7527 	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7528 		IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7529 		return 0;
7530 	}
7531 
7532 	if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7533 		IPW_DEBUG_ASSOC("Not attempting association (already in "
7534 				"progress)\n");
7535 		return 0;
7536 	}
7537 
7538 	if (priv->status & STATUS_DISASSOCIATING) {
7539 		IPW_DEBUG_ASSOC("Not attempting association (in disassociating)\n");
7540 		schedule_work(&priv->associate);
7541 		return 0;
7542 	}
7543 
7544 	if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7545 		IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7546 				"initialized)\n");
7547 		return 0;
7548 	}
7549 
7550 	if (!(priv->config & CFG_ASSOCIATE) &&
7551 	    !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) {
7552 		IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7553 		return 0;
7554 	}
7555 
7556 	/* Protect our use of the network_list */
7557 	spin_lock_irqsave(&priv->ieee->lock, flags);
7558 	list_for_each_entry(network, &priv->ieee->network_list, list)
7559 	    ipw_best_network(priv, &match, network, 0);
7560 
7561 	network = match.network;
7562 	rates = &match.rates;
7563 
7564 	if (network == NULL &&
7565 	    priv->ieee->iw_mode == IW_MODE_ADHOC &&
7566 	    priv->config & CFG_ADHOC_CREATE &&
7567 	    priv->config & CFG_STATIC_ESSID &&
7568 	    priv->config & CFG_STATIC_CHANNEL) {
7569 		/* Use oldest network if the free list is empty */
7570 		if (list_empty(&priv->ieee->network_free_list)) {
7571 			struct libipw_network *oldest = NULL;
7572 			struct libipw_network *target;
7573 
7574 			list_for_each_entry(target, &priv->ieee->network_list, list) {
7575 				if ((oldest == NULL) ||
7576 				    (target->last_scanned < oldest->last_scanned))
7577 					oldest = target;
7578 			}
7579 
7580 			/* If there are no more slots, expire the oldest */
7581 			list_del(&oldest->list);
7582 			target = oldest;
7583 			IPW_DEBUG_ASSOC("Expired '%*pE' (%pM) from network list.\n",
7584 					target->ssid_len, target->ssid,
7585 					target->bssid);
7586 			list_add_tail(&target->list,
7587 				      &priv->ieee->network_free_list);
7588 		}
7589 
7590 		element = priv->ieee->network_free_list.next;
7591 		network = list_entry(element, struct libipw_network, list);
7592 		ipw_adhoc_create(priv, network);
7593 		rates = &priv->rates;
7594 		list_del(element);
7595 		list_add_tail(&network->list, &priv->ieee->network_list);
7596 	}
7597 	spin_unlock_irqrestore(&priv->ieee->lock, flags);
7598 
7599 	/* If we reached the end of the list, then we don't have any valid
7600 	 * matching APs */
7601 	if (!network) {
7602 		ipw_debug_config(priv);
7603 
7604 		if (!(priv->status & STATUS_SCANNING)) {
7605 			if (!(priv->config & CFG_SPEED_SCAN))
7606 				schedule_delayed_work(&priv->request_scan,
7607 						      SCAN_INTERVAL);
7608 			else
7609 				schedule_delayed_work(&priv->request_scan, 0);
7610 		}
7611 
7612 		return 0;
7613 	}
7614 
7615 	ipw_associate_network(priv, network, rates, 0);
7616 
7617 	return 1;
7618 }
7619 
7620 static void ipw_bg_associate(struct work_struct *work)
7621 {
7622 	struct ipw_priv *priv =
7623 		container_of(work, struct ipw_priv, associate);
7624 	mutex_lock(&priv->mutex);
7625 	ipw_associate(priv);
7626 	mutex_unlock(&priv->mutex);
7627 }
7628 
7629 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7630 				      struct sk_buff *skb)
7631 {
7632 	struct ieee80211_hdr *hdr;
7633 	u16 fc;
7634 
7635 	hdr = (struct ieee80211_hdr *)skb->data;
7636 	fc = le16_to_cpu(hdr->frame_control);
7637 	if (!(fc & IEEE80211_FCTL_PROTECTED))
7638 		return;
7639 
7640 	fc &= ~IEEE80211_FCTL_PROTECTED;
7641 	hdr->frame_control = cpu_to_le16(fc);
7642 	switch (priv->ieee->sec.level) {
7643 	case SEC_LEVEL_3:
7644 		/* Remove CCMP HDR */
7645 		memmove(skb->data + LIBIPW_3ADDR_LEN,
7646 			skb->data + LIBIPW_3ADDR_LEN + 8,
7647 			skb->len - LIBIPW_3ADDR_LEN - 8);
7648 		skb_trim(skb, skb->len - 16);	/* CCMP_HDR_LEN + CCMP_MIC_LEN */
7649 		break;
7650 	case SEC_LEVEL_2:
7651 		break;
7652 	case SEC_LEVEL_1:
7653 		/* Remove IV */
7654 		memmove(skb->data + LIBIPW_3ADDR_LEN,
7655 			skb->data + LIBIPW_3ADDR_LEN + 4,
7656 			skb->len - LIBIPW_3ADDR_LEN - 4);
7657 		skb_trim(skb, skb->len - 8);	/* IV + ICV */
7658 		break;
7659 	case SEC_LEVEL_0:
7660 		break;
7661 	default:
7662 		printk(KERN_ERR "Unknown security level %d\n",
7663 		       priv->ieee->sec.level);
7664 		break;
7665 	}
7666 }
7667 
7668 static void ipw_handle_data_packet(struct ipw_priv *priv,
7669 				   struct ipw_rx_mem_buffer *rxb,
7670 				   struct libipw_rx_stats *stats)
7671 {
7672 	struct net_device *dev = priv->net_dev;
7673 	struct libipw_hdr_4addr *hdr;
7674 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7675 
7676 	/* We received data from the HW, so stop the watchdog */
7677 	netif_trans_update(dev);
7678 
7679 	/* We only process data packets if the
7680 	 * interface is open */
7681 	if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7682 		     skb_tailroom(rxb->skb))) {
7683 		dev->stats.rx_errors++;
7684 		priv->wstats.discard.misc++;
7685 		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7686 		return;
7687 	} else if (unlikely(!netif_running(priv->net_dev))) {
7688 		dev->stats.rx_dropped++;
7689 		priv->wstats.discard.misc++;
7690 		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7691 		return;
7692 	}
7693 
7694 	/* Advance skb->data to the start of the actual payload */
7695 	skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7696 
7697 	/* Set the size of the skb to the size of the frame */
7698 	skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7699 
7700 	IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7701 
7702 	/* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7703 	hdr = (struct libipw_hdr_4addr *)rxb->skb->data;
7704 	if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7705 	    (is_multicast_ether_addr(hdr->addr1) ?
7706 	     !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7707 		ipw_rebuild_decrypted_skb(priv, rxb->skb);
7708 
7709 	if (!libipw_rx(priv->ieee, rxb->skb, stats))
7710 		dev->stats.rx_errors++;
7711 	else {			/* libipw_rx succeeded, so it now owns the SKB */
7712 		rxb->skb = NULL;
7713 		__ipw_led_activity_on(priv);
7714 	}
7715 }
7716 
7717 #ifdef CONFIG_IPW2200_RADIOTAP
7718 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7719 					   struct ipw_rx_mem_buffer *rxb,
7720 					   struct libipw_rx_stats *stats)
7721 {
7722 	struct net_device *dev = priv->net_dev;
7723 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7724 	struct ipw_rx_frame *frame = &pkt->u.frame;
7725 
7726 	/* initial pull of some data */
7727 	u16 received_channel = frame->received_channel;
7728 	u8 antennaAndPhy = frame->antennaAndPhy;
7729 	s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM;	/* call it signed anyhow */
7730 	u16 pktrate = frame->rate;
7731 
7732 	/* Magic struct that slots into the radiotap header -- no reason
7733 	 * to build this manually element by element, we can write it much
7734 	 * more efficiently than we can parse it. ORDER MATTERS HERE */
7735 	struct ipw_rt_hdr *ipw_rt;
7736 
7737 	unsigned short len = le16_to_cpu(pkt->u.frame.length);
7738 
7739 	/* We received data from the HW, so stop the watchdog */
7740 	netif_trans_update(dev);
7741 
7742 	/* We only process data packets if the
7743 	 * interface is open */
7744 	if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7745 		     skb_tailroom(rxb->skb))) {
7746 		dev->stats.rx_errors++;
7747 		priv->wstats.discard.misc++;
7748 		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7749 		return;
7750 	} else if (unlikely(!netif_running(priv->net_dev))) {
7751 		dev->stats.rx_dropped++;
7752 		priv->wstats.discard.misc++;
7753 		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7754 		return;
7755 	}
7756 
7757 	/* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7758 	 * that now */
7759 	if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7760 		/* FIXME: Should alloc bigger skb instead */
7761 		dev->stats.rx_dropped++;
7762 		priv->wstats.discard.misc++;
7763 		IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7764 		return;
7765 	}
7766 
7767 	/* copy the frame itself */
7768 	memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7769 		rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7770 
7771 	ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7772 
7773 	ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7774 	ipw_rt->rt_hdr.it_pad = 0;	/* always good to zero */
7775 	ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr));	/* total header+data */
7776 
7777 	/* Big bitfield of all the fields we provide in radiotap */
7778 	ipw_rt->rt_hdr.it_present = cpu_to_le32(
7779 	     (1 << IEEE80211_RADIOTAP_TSFT) |
7780 	     (1 << IEEE80211_RADIOTAP_FLAGS) |
7781 	     (1 << IEEE80211_RADIOTAP_RATE) |
7782 	     (1 << IEEE80211_RADIOTAP_CHANNEL) |
7783 	     (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7784 	     (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7785 	     (1 << IEEE80211_RADIOTAP_ANTENNA));
7786 
7787 	/* Zero the flags, we'll add to them as we go */
7788 	ipw_rt->rt_flags = 0;
7789 	ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7790 			       frame->parent_tsf[2] << 16 |
7791 			       frame->parent_tsf[1] << 8  |
7792 			       frame->parent_tsf[0]);
7793 
7794 	/* Convert signal to DBM */
7795 	ipw_rt->rt_dbmsignal = antsignal;
7796 	ipw_rt->rt_dbmnoise = (s8) le16_to_cpu(frame->noise);
7797 
7798 	/* Convert the channel data and set the flags */
7799 	ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7800 	if (received_channel > 14) {	/* 802.11a */
7801 		ipw_rt->rt_chbitmask =
7802 		    cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7803 	} else if (antennaAndPhy & 32) {	/* 802.11b */
7804 		ipw_rt->rt_chbitmask =
7805 		    cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7806 	} else {		/* 802.11g */
7807 		ipw_rt->rt_chbitmask =
7808 		    cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7809 	}
7810 
7811 	/* set the rate in multiples of 500k/s */
7812 	switch (pktrate) {
7813 	case IPW_TX_RATE_1MB:
7814 		ipw_rt->rt_rate = 2;
7815 		break;
7816 	case IPW_TX_RATE_2MB:
7817 		ipw_rt->rt_rate = 4;
7818 		break;
7819 	case IPW_TX_RATE_5MB:
7820 		ipw_rt->rt_rate = 10;
7821 		break;
7822 	case IPW_TX_RATE_6MB:
7823 		ipw_rt->rt_rate = 12;
7824 		break;
7825 	case IPW_TX_RATE_9MB:
7826 		ipw_rt->rt_rate = 18;
7827 		break;
7828 	case IPW_TX_RATE_11MB:
7829 		ipw_rt->rt_rate = 22;
7830 		break;
7831 	case IPW_TX_RATE_12MB:
7832 		ipw_rt->rt_rate = 24;
7833 		break;
7834 	case IPW_TX_RATE_18MB:
7835 		ipw_rt->rt_rate = 36;
7836 		break;
7837 	case IPW_TX_RATE_24MB:
7838 		ipw_rt->rt_rate = 48;
7839 		break;
7840 	case IPW_TX_RATE_36MB:
7841 		ipw_rt->rt_rate = 72;
7842 		break;
7843 	case IPW_TX_RATE_48MB:
7844 		ipw_rt->rt_rate = 96;
7845 		break;
7846 	case IPW_TX_RATE_54MB:
7847 		ipw_rt->rt_rate = 108;
7848 		break;
7849 	default:
7850 		ipw_rt->rt_rate = 0;
7851 		break;
7852 	}
7853 
7854 	/* antenna number */
7855 	ipw_rt->rt_antenna = (antennaAndPhy & 3);	/* Is this right? */
7856 
7857 	/* set the preamble flag if we have it */
7858 	if ((antennaAndPhy & 64))
7859 		ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7860 
7861 	/* Set the size of the skb to the size of the frame */
7862 	skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7863 
7864 	IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7865 
7866 	if (!libipw_rx(priv->ieee, rxb->skb, stats))
7867 		dev->stats.rx_errors++;
7868 	else {			/* libipw_rx succeeded, so it now owns the SKB */
7869 		rxb->skb = NULL;
7870 		/* no LED during capture */
7871 	}
7872 }
7873 #endif
7874 
7875 #ifdef CONFIG_IPW2200_PROMISCUOUS
7876 #define libipw_is_probe_response(fc) \
7877    ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
7878     (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
7879 
7880 #define libipw_is_management(fc) \
7881    ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
7882 
7883 #define libipw_is_control(fc) \
7884    ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
7885 
7886 #define libipw_is_data(fc) \
7887    ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
7888 
7889 #define libipw_is_assoc_request(fc) \
7890    ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
7891 
7892 #define libipw_is_reassoc_request(fc) \
7893    ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
7894 
7895 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7896 				      struct ipw_rx_mem_buffer *rxb,
7897 				      struct libipw_rx_stats *stats)
7898 {
7899 	struct net_device *dev = priv->prom_net_dev;
7900 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7901 	struct ipw_rx_frame *frame = &pkt->u.frame;
7902 	struct ipw_rt_hdr *ipw_rt;
7903 
7904 	/* First cache any information we need before we overwrite
7905 	 * the information provided in the skb from the hardware */
7906 	struct ieee80211_hdr *hdr;
7907 	u16 channel = frame->received_channel;
7908 	u8 phy_flags = frame->antennaAndPhy;
7909 	s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
7910 	s8 noise = (s8) le16_to_cpu(frame->noise);
7911 	u8 rate = frame->rate;
7912 	unsigned short len = le16_to_cpu(pkt->u.frame.length);
7913 	struct sk_buff *skb;
7914 	int hdr_only = 0;
7915 	u16 filter = priv->prom_priv->filter;
7916 
7917 	/* If the filter is set to not include Rx frames then return */
7918 	if (filter & IPW_PROM_NO_RX)
7919 		return;
7920 
7921 	/* We received data from the HW, so stop the watchdog */
7922 	netif_trans_update(dev);
7923 
7924 	if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
7925 		dev->stats.rx_errors++;
7926 		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7927 		return;
7928 	}
7929 
7930 	/* We only process data packets if the interface is open */
7931 	if (unlikely(!netif_running(dev))) {
7932 		dev->stats.rx_dropped++;
7933 		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7934 		return;
7935 	}
7936 
7937 	/* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7938 	 * that now */
7939 	if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7940 		/* FIXME: Should alloc bigger skb instead */
7941 		dev->stats.rx_dropped++;
7942 		IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7943 		return;
7944 	}
7945 
7946 	hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
7947 	if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
7948 		if (filter & IPW_PROM_NO_MGMT)
7949 			return;
7950 		if (filter & IPW_PROM_MGMT_HEADER_ONLY)
7951 			hdr_only = 1;
7952 	} else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
7953 		if (filter & IPW_PROM_NO_CTL)
7954 			return;
7955 		if (filter & IPW_PROM_CTL_HEADER_ONLY)
7956 			hdr_only = 1;
7957 	} else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
7958 		if (filter & IPW_PROM_NO_DATA)
7959 			return;
7960 		if (filter & IPW_PROM_DATA_HEADER_ONLY)
7961 			hdr_only = 1;
7962 	}
7963 
7964 	/* Copy the SKB since this is for the promiscuous side */
7965 	skb = skb_copy(rxb->skb, GFP_ATOMIC);
7966 	if (skb == NULL) {
7967 		IPW_ERROR("skb_clone failed for promiscuous copy.\n");
7968 		return;
7969 	}
7970 
7971 	/* copy the frame data to write after where the radiotap header goes */
7972 	ipw_rt = (void *)skb->data;
7973 
7974 	if (hdr_only)
7975 		len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
7976 
7977 	memcpy(ipw_rt->payload, hdr, len);
7978 
7979 	ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7980 	ipw_rt->rt_hdr.it_pad = 0;	/* always good to zero */
7981 	ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt));	/* total header+data */
7982 
7983 	/* Set the size of the skb to the size of the frame */
7984 	skb_put(skb, sizeof(*ipw_rt) + len);
7985 
7986 	/* Big bitfield of all the fields we provide in radiotap */
7987 	ipw_rt->rt_hdr.it_present = cpu_to_le32(
7988 	     (1 << IEEE80211_RADIOTAP_TSFT) |
7989 	     (1 << IEEE80211_RADIOTAP_FLAGS) |
7990 	     (1 << IEEE80211_RADIOTAP_RATE) |
7991 	     (1 << IEEE80211_RADIOTAP_CHANNEL) |
7992 	     (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7993 	     (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7994 	     (1 << IEEE80211_RADIOTAP_ANTENNA));
7995 
7996 	/* Zero the flags, we'll add to them as we go */
7997 	ipw_rt->rt_flags = 0;
7998 	ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7999 			       frame->parent_tsf[2] << 16 |
8000 			       frame->parent_tsf[1] << 8  |
8001 			       frame->parent_tsf[0]);
8002 
8003 	/* Convert to DBM */
8004 	ipw_rt->rt_dbmsignal = signal;
8005 	ipw_rt->rt_dbmnoise = noise;
8006 
8007 	/* Convert the channel data and set the flags */
8008 	ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
8009 	if (channel > 14) {	/* 802.11a */
8010 		ipw_rt->rt_chbitmask =
8011 		    cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
8012 	} else if (phy_flags & (1 << 5)) {	/* 802.11b */
8013 		ipw_rt->rt_chbitmask =
8014 		    cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
8015 	} else {		/* 802.11g */
8016 		ipw_rt->rt_chbitmask =
8017 		    cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
8018 	}
8019 
8020 	/* set the rate in multiples of 500k/s */
8021 	switch (rate) {
8022 	case IPW_TX_RATE_1MB:
8023 		ipw_rt->rt_rate = 2;
8024 		break;
8025 	case IPW_TX_RATE_2MB:
8026 		ipw_rt->rt_rate = 4;
8027 		break;
8028 	case IPW_TX_RATE_5MB:
8029 		ipw_rt->rt_rate = 10;
8030 		break;
8031 	case IPW_TX_RATE_6MB:
8032 		ipw_rt->rt_rate = 12;
8033 		break;
8034 	case IPW_TX_RATE_9MB:
8035 		ipw_rt->rt_rate = 18;
8036 		break;
8037 	case IPW_TX_RATE_11MB:
8038 		ipw_rt->rt_rate = 22;
8039 		break;
8040 	case IPW_TX_RATE_12MB:
8041 		ipw_rt->rt_rate = 24;
8042 		break;
8043 	case IPW_TX_RATE_18MB:
8044 		ipw_rt->rt_rate = 36;
8045 		break;
8046 	case IPW_TX_RATE_24MB:
8047 		ipw_rt->rt_rate = 48;
8048 		break;
8049 	case IPW_TX_RATE_36MB:
8050 		ipw_rt->rt_rate = 72;
8051 		break;
8052 	case IPW_TX_RATE_48MB:
8053 		ipw_rt->rt_rate = 96;
8054 		break;
8055 	case IPW_TX_RATE_54MB:
8056 		ipw_rt->rt_rate = 108;
8057 		break;
8058 	default:
8059 		ipw_rt->rt_rate = 0;
8060 		break;
8061 	}
8062 
8063 	/* antenna number */
8064 	ipw_rt->rt_antenna = (phy_flags & 3);
8065 
8066 	/* set the preamble flag if we have it */
8067 	if (phy_flags & (1 << 6))
8068 		ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8069 
8070 	IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8071 
8072 	if (!libipw_rx(priv->prom_priv->ieee, skb, stats)) {
8073 		dev->stats.rx_errors++;
8074 		dev_kfree_skb_any(skb);
8075 	}
8076 }
8077 #endif
8078 
8079 static int is_network_packet(struct ipw_priv *priv,
8080 				    struct libipw_hdr_4addr *header)
8081 {
8082 	/* Filter incoming packets to determine if they are targeted toward
8083 	 * this network, discarding packets coming from ourselves */
8084 	switch (priv->ieee->iw_mode) {
8085 	case IW_MODE_ADHOC:	/* Header: Dest. | Source    | BSSID */
8086 		/* packets from our adapter are dropped (echo) */
8087 		if (ether_addr_equal(header->addr2, priv->net_dev->dev_addr))
8088 			return 0;
8089 
8090 		/* {broad,multi}cast packets to our BSSID go through */
8091 		if (is_multicast_ether_addr(header->addr1))
8092 			return ether_addr_equal(header->addr3, priv->bssid);
8093 
8094 		/* packets to our adapter go through */
8095 		return ether_addr_equal(header->addr1,
8096 					priv->net_dev->dev_addr);
8097 
8098 	case IW_MODE_INFRA:	/* Header: Dest. | BSSID | Source */
8099 		/* packets from our adapter are dropped (echo) */
8100 		if (ether_addr_equal(header->addr3, priv->net_dev->dev_addr))
8101 			return 0;
8102 
8103 		/* {broad,multi}cast packets to our BSS go through */
8104 		if (is_multicast_ether_addr(header->addr1))
8105 			return ether_addr_equal(header->addr2, priv->bssid);
8106 
8107 		/* packets to our adapter go through */
8108 		return ether_addr_equal(header->addr1,
8109 					priv->net_dev->dev_addr);
8110 	}
8111 
8112 	return 1;
8113 }
8114 
8115 #define IPW_PACKET_RETRY_TIME HZ
8116 
8117 static  int is_duplicate_packet(struct ipw_priv *priv,
8118 				      struct libipw_hdr_4addr *header)
8119 {
8120 	u16 sc = le16_to_cpu(header->seq_ctl);
8121 	u16 seq = WLAN_GET_SEQ_SEQ(sc);
8122 	u16 frag = WLAN_GET_SEQ_FRAG(sc);
8123 	u16 *last_seq, *last_frag;
8124 	unsigned long *last_time;
8125 
8126 	switch (priv->ieee->iw_mode) {
8127 	case IW_MODE_ADHOC:
8128 		{
8129 			struct list_head *p;
8130 			struct ipw_ibss_seq *entry = NULL;
8131 			u8 *mac = header->addr2;
8132 			int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8133 
8134 			list_for_each(p, &priv->ibss_mac_hash[index]) {
8135 				entry =
8136 				    list_entry(p, struct ipw_ibss_seq, list);
8137 				if (ether_addr_equal(entry->mac, mac))
8138 					break;
8139 			}
8140 			if (p == &priv->ibss_mac_hash[index]) {
8141 				entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8142 				if (!entry) {
8143 					IPW_ERROR
8144 					    ("Cannot malloc new mac entry\n");
8145 					return 0;
8146 				}
8147 				memcpy(entry->mac, mac, ETH_ALEN);
8148 				entry->seq_num = seq;
8149 				entry->frag_num = frag;
8150 				entry->packet_time = jiffies;
8151 				list_add(&entry->list,
8152 					 &priv->ibss_mac_hash[index]);
8153 				return 0;
8154 			}
8155 			last_seq = &entry->seq_num;
8156 			last_frag = &entry->frag_num;
8157 			last_time = &entry->packet_time;
8158 			break;
8159 		}
8160 	case IW_MODE_INFRA:
8161 		last_seq = &priv->last_seq_num;
8162 		last_frag = &priv->last_frag_num;
8163 		last_time = &priv->last_packet_time;
8164 		break;
8165 	default:
8166 		return 0;
8167 	}
8168 	if ((*last_seq == seq) &&
8169 	    time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8170 		if (*last_frag == frag)
8171 			goto drop;
8172 		if (*last_frag + 1 != frag)
8173 			/* out-of-order fragment */
8174 			goto drop;
8175 	} else
8176 		*last_seq = seq;
8177 
8178 	*last_frag = frag;
8179 	*last_time = jiffies;
8180 	return 0;
8181 
8182       drop:
8183 	/* Comment this line now since we observed the card receives
8184 	 * duplicate packets but the FCTL_RETRY bit is not set in the
8185 	 * IBSS mode with fragmentation enabled.
8186 	 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */
8187 	return 1;
8188 }
8189 
8190 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8191 				   struct ipw_rx_mem_buffer *rxb,
8192 				   struct libipw_rx_stats *stats)
8193 {
8194 	struct sk_buff *skb = rxb->skb;
8195 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8196 	struct libipw_hdr_4addr *header = (struct libipw_hdr_4addr *)
8197 	    (skb->data + IPW_RX_FRAME_SIZE);
8198 
8199 	libipw_rx_mgt(priv->ieee, header, stats);
8200 
8201 	if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8202 	    ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8203 	      IEEE80211_STYPE_PROBE_RESP) ||
8204 	     (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8205 	      IEEE80211_STYPE_BEACON))) {
8206 		if (ether_addr_equal(header->addr3, priv->bssid))
8207 			ipw_add_station(priv, header->addr2);
8208 	}
8209 
8210 	if (priv->config & CFG_NET_STATS) {
8211 		IPW_DEBUG_HC("sending stat packet\n");
8212 
8213 		/* Set the size of the skb to the size of the full
8214 		 * ipw header and 802.11 frame */
8215 		skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8216 			IPW_RX_FRAME_SIZE);
8217 
8218 		/* Advance past the ipw packet header to the 802.11 frame */
8219 		skb_pull(skb, IPW_RX_FRAME_SIZE);
8220 
8221 		/* Push the libipw_rx_stats before the 802.11 frame */
8222 		memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8223 
8224 		skb->dev = priv->ieee->dev;
8225 
8226 		/* Point raw at the libipw_stats */
8227 		skb_reset_mac_header(skb);
8228 
8229 		skb->pkt_type = PACKET_OTHERHOST;
8230 		skb->protocol = cpu_to_be16(ETH_P_80211_STATS);
8231 		memset(skb->cb, 0, sizeof(rxb->skb->cb));
8232 		netif_rx(skb);
8233 		rxb->skb = NULL;
8234 	}
8235 }
8236 
8237 /*
8238  * Main entry function for receiving a packet with 80211 headers.  This
8239  * should be called when ever the FW has notified us that there is a new
8240  * skb in the receive queue.
8241  */
8242 static void ipw_rx(struct ipw_priv *priv)
8243 {
8244 	struct ipw_rx_mem_buffer *rxb;
8245 	struct ipw_rx_packet *pkt;
8246 	struct libipw_hdr_4addr *header;
8247 	u32 r, i;
8248 	u8 network_packet;
8249 	u8 fill_rx = 0;
8250 
8251 	r = ipw_read32(priv, IPW_RX_READ_INDEX);
8252 	ipw_read32(priv, IPW_RX_WRITE_INDEX);
8253 	i = priv->rxq->read;
8254 
8255 	if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
8256 		fill_rx = 1;
8257 
8258 	while (i != r) {
8259 		rxb = priv->rxq->queue[i];
8260 		if (unlikely(rxb == NULL)) {
8261 			printk(KERN_CRIT "Queue not allocated!\n");
8262 			break;
8263 		}
8264 		priv->rxq->queue[i] = NULL;
8265 
8266 		dma_sync_single_for_cpu(&priv->pci_dev->dev, rxb->dma_addr,
8267 					IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
8268 
8269 		pkt = (struct ipw_rx_packet *)rxb->skb->data;
8270 		IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8271 			     pkt->header.message_type,
8272 			     pkt->header.rx_seq_num, pkt->header.control_bits);
8273 
8274 		switch (pkt->header.message_type) {
8275 		case RX_FRAME_TYPE:	/* 802.11 frame */  {
8276 				struct libipw_rx_stats stats = {
8277 					.rssi = pkt->u.frame.rssi_dbm -
8278 					    IPW_RSSI_TO_DBM,
8279 					.signal =
8280 					    pkt->u.frame.rssi_dbm -
8281 					    IPW_RSSI_TO_DBM + 0x100,
8282 					.noise =
8283 					    le16_to_cpu(pkt->u.frame.noise),
8284 					.rate = pkt->u.frame.rate,
8285 					.mac_time = jiffies,
8286 					.received_channel =
8287 					    pkt->u.frame.received_channel,
8288 					.freq =
8289 					    (pkt->u.frame.
8290 					     control & (1 << 0)) ?
8291 					    LIBIPW_24GHZ_BAND :
8292 					    LIBIPW_52GHZ_BAND,
8293 					.len = le16_to_cpu(pkt->u.frame.length),
8294 				};
8295 
8296 				if (stats.rssi != 0)
8297 					stats.mask |= LIBIPW_STATMASK_RSSI;
8298 				if (stats.signal != 0)
8299 					stats.mask |= LIBIPW_STATMASK_SIGNAL;
8300 				if (stats.noise != 0)
8301 					stats.mask |= LIBIPW_STATMASK_NOISE;
8302 				if (stats.rate != 0)
8303 					stats.mask |= LIBIPW_STATMASK_RATE;
8304 
8305 				priv->rx_packets++;
8306 
8307 #ifdef CONFIG_IPW2200_PROMISCUOUS
8308 	if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8309 		ipw_handle_promiscuous_rx(priv, rxb, &stats);
8310 #endif
8311 
8312 #ifdef CONFIG_IPW2200_MONITOR
8313 				if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8314 #ifdef CONFIG_IPW2200_RADIOTAP
8315 
8316                 ipw_handle_data_packet_monitor(priv,
8317 					       rxb,
8318 					       &stats);
8319 #else
8320 		ipw_handle_data_packet(priv, rxb,
8321 				       &stats);
8322 #endif
8323 					break;
8324 				}
8325 #endif
8326 
8327 				header =
8328 				    (struct libipw_hdr_4addr *)(rxb->skb->
8329 								   data +
8330 								   IPW_RX_FRAME_SIZE);
8331 				/* TODO: Check Ad-Hoc dest/source and make sure
8332 				 * that we are actually parsing these packets
8333 				 * correctly -- we should probably use the
8334 				 * frame control of the packet and disregard
8335 				 * the current iw_mode */
8336 
8337 				network_packet =
8338 				    is_network_packet(priv, header);
8339 				if (network_packet && priv->assoc_network) {
8340 					priv->assoc_network->stats.rssi =
8341 					    stats.rssi;
8342 					priv->exp_avg_rssi =
8343 					    exponential_average(priv->exp_avg_rssi,
8344 					    stats.rssi, DEPTH_RSSI);
8345 				}
8346 
8347 				IPW_DEBUG_RX("Frame: len=%u\n",
8348 					     le16_to_cpu(pkt->u.frame.length));
8349 
8350 				if (le16_to_cpu(pkt->u.frame.length) <
8351 				    libipw_get_hdrlen(le16_to_cpu(
8352 						    header->frame_ctl))) {
8353 					IPW_DEBUG_DROP
8354 					    ("Received packet is too small. "
8355 					     "Dropping.\n");
8356 					priv->net_dev->stats.rx_errors++;
8357 					priv->wstats.discard.misc++;
8358 					break;
8359 				}
8360 
8361 				switch (WLAN_FC_GET_TYPE
8362 					(le16_to_cpu(header->frame_ctl))) {
8363 
8364 				case IEEE80211_FTYPE_MGMT:
8365 					ipw_handle_mgmt_packet(priv, rxb,
8366 							       &stats);
8367 					break;
8368 
8369 				case IEEE80211_FTYPE_CTL:
8370 					break;
8371 
8372 				case IEEE80211_FTYPE_DATA:
8373 					if (unlikely(!network_packet ||
8374 						     is_duplicate_packet(priv,
8375 									 header)))
8376 					{
8377 						IPW_DEBUG_DROP("Dropping: "
8378 							       "%pM, "
8379 							       "%pM, "
8380 							       "%pM\n",
8381 							       header->addr1,
8382 							       header->addr2,
8383 							       header->addr3);
8384 						break;
8385 					}
8386 
8387 					ipw_handle_data_packet(priv, rxb,
8388 							       &stats);
8389 
8390 					break;
8391 				}
8392 				break;
8393 			}
8394 
8395 		case RX_HOST_NOTIFICATION_TYPE:{
8396 				IPW_DEBUG_RX
8397 				    ("Notification: subtype=%02X flags=%02X size=%d\n",
8398 				     pkt->u.notification.subtype,
8399 				     pkt->u.notification.flags,
8400 				     le16_to_cpu(pkt->u.notification.size));
8401 				ipw_rx_notification(priv, &pkt->u.notification);
8402 				break;
8403 			}
8404 
8405 		default:
8406 			IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8407 				     pkt->header.message_type);
8408 			break;
8409 		}
8410 
8411 		/* For now we just don't re-use anything.  We can tweak this
8412 		 * later to try and re-use notification packets and SKBs that
8413 		 * fail to Rx correctly */
8414 		if (rxb->skb != NULL) {
8415 			dev_kfree_skb_any(rxb->skb);
8416 			rxb->skb = NULL;
8417 		}
8418 
8419 		dma_unmap_single(&priv->pci_dev->dev, rxb->dma_addr,
8420 				 IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
8421 		list_add_tail(&rxb->list, &priv->rxq->rx_used);
8422 
8423 		i = (i + 1) % RX_QUEUE_SIZE;
8424 
8425 		/* If there are a lot of unsued frames, restock the Rx queue
8426 		 * so the ucode won't assert */
8427 		if (fill_rx) {
8428 			priv->rxq->read = i;
8429 			ipw_rx_queue_replenish(priv);
8430 		}
8431 	}
8432 
8433 	/* Backtrack one entry */
8434 	priv->rxq->read = i;
8435 	ipw_rx_queue_restock(priv);
8436 }
8437 
8438 #define DEFAULT_RTS_THRESHOLD     2304U
8439 #define MIN_RTS_THRESHOLD         1U
8440 #define MAX_RTS_THRESHOLD         2304U
8441 #define DEFAULT_BEACON_INTERVAL   100U
8442 #define	DEFAULT_SHORT_RETRY_LIMIT 7U
8443 #define	DEFAULT_LONG_RETRY_LIMIT  4U
8444 
8445 /*
8446  * ipw_sw_reset
8447  * @option: options to control different reset behaviour
8448  * 	    0 = reset everything except the 'disable' module_param
8449  * 	    1 = reset everything and print out driver info (for probe only)
8450  * 	    2 = reset everything
8451  */
8452 static int ipw_sw_reset(struct ipw_priv *priv, int option)
8453 {
8454 	int band, modulation;
8455 	int old_mode = priv->ieee->iw_mode;
8456 
8457 	/* Initialize module parameter values here */
8458 	priv->config = 0;
8459 
8460 	/* We default to disabling the LED code as right now it causes
8461 	 * too many systems to lock up... */
8462 	if (!led_support)
8463 		priv->config |= CFG_NO_LED;
8464 
8465 	if (associate)
8466 		priv->config |= CFG_ASSOCIATE;
8467 	else
8468 		IPW_DEBUG_INFO("Auto associate disabled.\n");
8469 
8470 	if (auto_create)
8471 		priv->config |= CFG_ADHOC_CREATE;
8472 	else
8473 		IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8474 
8475 	priv->config &= ~CFG_STATIC_ESSID;
8476 	priv->essid_len = 0;
8477 	memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8478 
8479 	if (disable && option) {
8480 		priv->status |= STATUS_RF_KILL_SW;
8481 		IPW_DEBUG_INFO("Radio disabled.\n");
8482 	}
8483 
8484 	if (default_channel != 0) {
8485 		priv->config |= CFG_STATIC_CHANNEL;
8486 		priv->channel = default_channel;
8487 		IPW_DEBUG_INFO("Bind to static channel %d\n", default_channel);
8488 		/* TODO: Validate that provided channel is in range */
8489 	}
8490 #ifdef CONFIG_IPW2200_QOS
8491 	ipw_qos_init(priv, qos_enable, qos_burst_enable,
8492 		     burst_duration_CCK, burst_duration_OFDM);
8493 #endif				/* CONFIG_IPW2200_QOS */
8494 
8495 	switch (network_mode) {
8496 	case 1:
8497 		priv->ieee->iw_mode = IW_MODE_ADHOC;
8498 		priv->net_dev->type = ARPHRD_ETHER;
8499 
8500 		break;
8501 #ifdef CONFIG_IPW2200_MONITOR
8502 	case 2:
8503 		priv->ieee->iw_mode = IW_MODE_MONITOR;
8504 #ifdef CONFIG_IPW2200_RADIOTAP
8505 		priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8506 #else
8507 		priv->net_dev->type = ARPHRD_IEEE80211;
8508 #endif
8509 		break;
8510 #endif
8511 	default:
8512 	case 0:
8513 		priv->net_dev->type = ARPHRD_ETHER;
8514 		priv->ieee->iw_mode = IW_MODE_INFRA;
8515 		break;
8516 	}
8517 
8518 	if (hwcrypto) {
8519 		priv->ieee->host_encrypt = 0;
8520 		priv->ieee->host_encrypt_msdu = 0;
8521 		priv->ieee->host_decrypt = 0;
8522 		priv->ieee->host_mc_decrypt = 0;
8523 	}
8524 	IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8525 
8526 	/* IPW2200/2915 is abled to do hardware fragmentation. */
8527 	priv->ieee->host_open_frag = 0;
8528 
8529 	if ((priv->pci_dev->device == 0x4223) ||
8530 	    (priv->pci_dev->device == 0x4224)) {
8531 		if (option == 1)
8532 			printk(KERN_INFO DRV_NAME
8533 			       ": Detected Intel PRO/Wireless 2915ABG Network "
8534 			       "Connection\n");
8535 		priv->ieee->abg_true = 1;
8536 		band = LIBIPW_52GHZ_BAND | LIBIPW_24GHZ_BAND;
8537 		modulation = LIBIPW_OFDM_MODULATION |
8538 		    LIBIPW_CCK_MODULATION;
8539 		priv->adapter = IPW_2915ABG;
8540 		priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8541 	} else {
8542 		if (option == 1)
8543 			printk(KERN_INFO DRV_NAME
8544 			       ": Detected Intel PRO/Wireless 2200BG Network "
8545 			       "Connection\n");
8546 
8547 		priv->ieee->abg_true = 0;
8548 		band = LIBIPW_24GHZ_BAND;
8549 		modulation = LIBIPW_OFDM_MODULATION |
8550 		    LIBIPW_CCK_MODULATION;
8551 		priv->adapter = IPW_2200BG;
8552 		priv->ieee->mode = IEEE_G | IEEE_B;
8553 	}
8554 
8555 	priv->ieee->freq_band = band;
8556 	priv->ieee->modulation = modulation;
8557 
8558 	priv->rates_mask = LIBIPW_DEFAULT_RATES_MASK;
8559 
8560 	priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8561 	priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8562 
8563 	priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8564 	priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8565 	priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8566 
8567 	/* If power management is turned on, default to AC mode */
8568 	priv->power_mode = IPW_POWER_AC;
8569 	priv->tx_power = IPW_TX_POWER_DEFAULT;
8570 
8571 	return old_mode == priv->ieee->iw_mode;
8572 }
8573 
8574 /*
8575  * This file defines the Wireless Extension handlers.  It does not
8576  * define any methods of hardware manipulation and relies on the
8577  * functions defined in ipw_main to provide the HW interaction.
8578  *
8579  * The exception to this is the use of the ipw_get_ordinal()
8580  * function used to poll the hardware vs. making unnecessary calls.
8581  *
8582  */
8583 
8584 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8585 {
8586 	if (channel == 0) {
8587 		IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8588 		priv->config &= ~CFG_STATIC_CHANNEL;
8589 		IPW_DEBUG_ASSOC("Attempting to associate with new "
8590 				"parameters.\n");
8591 		ipw_associate(priv);
8592 		return 0;
8593 	}
8594 
8595 	priv->config |= CFG_STATIC_CHANNEL;
8596 
8597 	if (priv->channel == channel) {
8598 		IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8599 			       channel);
8600 		return 0;
8601 	}
8602 
8603 	IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8604 	priv->channel = channel;
8605 
8606 #ifdef CONFIG_IPW2200_MONITOR
8607 	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8608 		int i;
8609 		if (priv->status & STATUS_SCANNING) {
8610 			IPW_DEBUG_SCAN("Scan abort triggered due to "
8611 				       "channel change.\n");
8612 			ipw_abort_scan(priv);
8613 		}
8614 
8615 		for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8616 			udelay(10);
8617 
8618 		if (priv->status & STATUS_SCANNING)
8619 			IPW_DEBUG_SCAN("Still scanning...\n");
8620 		else
8621 			IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8622 				       1000 - i);
8623 
8624 		return 0;
8625 	}
8626 #endif				/* CONFIG_IPW2200_MONITOR */
8627 
8628 	/* Network configuration changed -- force [re]association */
8629 	IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8630 	if (!ipw_disassociate(priv))
8631 		ipw_associate(priv);
8632 
8633 	return 0;
8634 }
8635 
8636 static int ipw_wx_set_freq(struct net_device *dev,
8637 			   struct iw_request_info *info,
8638 			   union iwreq_data *wrqu, char *extra)
8639 {
8640 	struct ipw_priv *priv = libipw_priv(dev);
8641 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8642 	struct iw_freq *fwrq = &wrqu->freq;
8643 	int ret = 0, i;
8644 	u8 channel, flags;
8645 	int band;
8646 
8647 	if (fwrq->m == 0) {
8648 		IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8649 		mutex_lock(&priv->mutex);
8650 		ret = ipw_set_channel(priv, 0);
8651 		mutex_unlock(&priv->mutex);
8652 		return ret;
8653 	}
8654 	/* if setting by freq convert to channel */
8655 	if (fwrq->e == 1) {
8656 		channel = libipw_freq_to_channel(priv->ieee, fwrq->m);
8657 		if (channel == 0)
8658 			return -EINVAL;
8659 	} else
8660 		channel = fwrq->m;
8661 
8662 	if (!(band = libipw_is_valid_channel(priv->ieee, channel)))
8663 		return -EINVAL;
8664 
8665 	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8666 		i = libipw_channel_to_index(priv->ieee, channel);
8667 		if (i == -1)
8668 			return -EINVAL;
8669 
8670 		flags = (band == LIBIPW_24GHZ_BAND) ?
8671 		    geo->bg[i].flags : geo->a[i].flags;
8672 		if (flags & LIBIPW_CH_PASSIVE_ONLY) {
8673 			IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8674 			return -EINVAL;
8675 		}
8676 	}
8677 
8678 	IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
8679 	mutex_lock(&priv->mutex);
8680 	ret = ipw_set_channel(priv, channel);
8681 	mutex_unlock(&priv->mutex);
8682 	return ret;
8683 }
8684 
8685 static int ipw_wx_get_freq(struct net_device *dev,
8686 			   struct iw_request_info *info,
8687 			   union iwreq_data *wrqu, char *extra)
8688 {
8689 	struct ipw_priv *priv = libipw_priv(dev);
8690 
8691 	wrqu->freq.e = 0;
8692 
8693 	/* If we are associated, trying to associate, or have a statically
8694 	 * configured CHANNEL then return that; otherwise return ANY */
8695 	mutex_lock(&priv->mutex);
8696 	if (priv->config & CFG_STATIC_CHANNEL ||
8697 	    priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8698 		int i;
8699 
8700 		i = libipw_channel_to_index(priv->ieee, priv->channel);
8701 		BUG_ON(i == -1);
8702 		wrqu->freq.e = 1;
8703 
8704 		switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
8705 		case LIBIPW_52GHZ_BAND:
8706 			wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8707 			break;
8708 
8709 		case LIBIPW_24GHZ_BAND:
8710 			wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8711 			break;
8712 
8713 		default:
8714 			BUG();
8715 		}
8716 	} else
8717 		wrqu->freq.m = 0;
8718 
8719 	mutex_unlock(&priv->mutex);
8720 	IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
8721 	return 0;
8722 }
8723 
8724 static int ipw_wx_set_mode(struct net_device *dev,
8725 			   struct iw_request_info *info,
8726 			   union iwreq_data *wrqu, char *extra)
8727 {
8728 	struct ipw_priv *priv = libipw_priv(dev);
8729 	int err = 0;
8730 
8731 	IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8732 
8733 	switch (wrqu->mode) {
8734 #ifdef CONFIG_IPW2200_MONITOR
8735 	case IW_MODE_MONITOR:
8736 #endif
8737 	case IW_MODE_ADHOC:
8738 	case IW_MODE_INFRA:
8739 		break;
8740 	case IW_MODE_AUTO:
8741 		wrqu->mode = IW_MODE_INFRA;
8742 		break;
8743 	default:
8744 		return -EINVAL;
8745 	}
8746 	if (wrqu->mode == priv->ieee->iw_mode)
8747 		return 0;
8748 
8749 	mutex_lock(&priv->mutex);
8750 
8751 	ipw_sw_reset(priv, 0);
8752 
8753 #ifdef CONFIG_IPW2200_MONITOR
8754 	if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8755 		priv->net_dev->type = ARPHRD_ETHER;
8756 
8757 	if (wrqu->mode == IW_MODE_MONITOR)
8758 #ifdef CONFIG_IPW2200_RADIOTAP
8759 		priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8760 #else
8761 		priv->net_dev->type = ARPHRD_IEEE80211;
8762 #endif
8763 #endif				/* CONFIG_IPW2200_MONITOR */
8764 
8765 	/* Free the existing firmware and reset the fw_loaded
8766 	 * flag so ipw_load() will bring in the new firmware */
8767 	free_firmware();
8768 
8769 	priv->ieee->iw_mode = wrqu->mode;
8770 
8771 	schedule_work(&priv->adapter_restart);
8772 	mutex_unlock(&priv->mutex);
8773 	return err;
8774 }
8775 
8776 static int ipw_wx_get_mode(struct net_device *dev,
8777 			   struct iw_request_info *info,
8778 			   union iwreq_data *wrqu, char *extra)
8779 {
8780 	struct ipw_priv *priv = libipw_priv(dev);
8781 	mutex_lock(&priv->mutex);
8782 	wrqu->mode = priv->ieee->iw_mode;
8783 	IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8784 	mutex_unlock(&priv->mutex);
8785 	return 0;
8786 }
8787 
8788 /* Values are in microsecond */
8789 static const s32 timeout_duration[] = {
8790 	350000,
8791 	250000,
8792 	75000,
8793 	37000,
8794 	25000,
8795 };
8796 
8797 static const s32 period_duration[] = {
8798 	400000,
8799 	700000,
8800 	1000000,
8801 	1000000,
8802 	1000000
8803 };
8804 
8805 static int ipw_wx_get_range(struct net_device *dev,
8806 			    struct iw_request_info *info,
8807 			    union iwreq_data *wrqu, char *extra)
8808 {
8809 	struct ipw_priv *priv = libipw_priv(dev);
8810 	struct iw_range *range = (struct iw_range *)extra;
8811 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8812 	int i = 0, j;
8813 
8814 	wrqu->data.length = sizeof(*range);
8815 	memset(range, 0, sizeof(*range));
8816 
8817 	/* 54Mbs == ~27 Mb/s real (802.11g) */
8818 	range->throughput = 27 * 1000 * 1000;
8819 
8820 	range->max_qual.qual = 100;
8821 	/* TODO: Find real max RSSI and stick here */
8822 	range->max_qual.level = 0;
8823 	range->max_qual.noise = 0;
8824 	range->max_qual.updated = 7;	/* Updated all three */
8825 
8826 	range->avg_qual.qual = 70;
8827 	/* TODO: Find real 'good' to 'bad' threshold value for RSSI */
8828 	range->avg_qual.level = 0;	/* FIXME to real average level */
8829 	range->avg_qual.noise = 0;
8830 	range->avg_qual.updated = 7;	/* Updated all three */
8831 	mutex_lock(&priv->mutex);
8832 	range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8833 
8834 	for (i = 0; i < range->num_bitrates; i++)
8835 		range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8836 		    500000;
8837 
8838 	range->max_rts = DEFAULT_RTS_THRESHOLD;
8839 	range->min_frag = MIN_FRAG_THRESHOLD;
8840 	range->max_frag = MAX_FRAG_THRESHOLD;
8841 
8842 	range->encoding_size[0] = 5;
8843 	range->encoding_size[1] = 13;
8844 	range->num_encoding_sizes = 2;
8845 	range->max_encoding_tokens = WEP_KEYS;
8846 
8847 	/* Set the Wireless Extension versions */
8848 	range->we_version_compiled = WIRELESS_EXT;
8849 	range->we_version_source = 18;
8850 
8851 	i = 0;
8852 	if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8853 		for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8854 			if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8855 			    (geo->bg[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8856 				continue;
8857 
8858 			range->freq[i].i = geo->bg[j].channel;
8859 			range->freq[i].m = geo->bg[j].freq * 100000;
8860 			range->freq[i].e = 1;
8861 			i++;
8862 		}
8863 	}
8864 
8865 	if (priv->ieee->mode & IEEE_A) {
8866 		for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8867 			if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8868 			    (geo->a[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8869 				continue;
8870 
8871 			range->freq[i].i = geo->a[j].channel;
8872 			range->freq[i].m = geo->a[j].freq * 100000;
8873 			range->freq[i].e = 1;
8874 			i++;
8875 		}
8876 	}
8877 
8878 	range->num_channels = i;
8879 	range->num_frequency = i;
8880 
8881 	mutex_unlock(&priv->mutex);
8882 
8883 	/* Event capability (kernel + driver) */
8884 	range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8885 				IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8886 				IW_EVENT_CAPA_MASK(SIOCGIWAP) |
8887 				IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
8888 	range->event_capa[1] = IW_EVENT_CAPA_K_1;
8889 
8890 	range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8891 		IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8892 
8893 	range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
8894 
8895 	IPW_DEBUG_WX("GET Range\n");
8896 	return 0;
8897 }
8898 
8899 static int ipw_wx_set_wap(struct net_device *dev,
8900 			  struct iw_request_info *info,
8901 			  union iwreq_data *wrqu, char *extra)
8902 {
8903 	struct ipw_priv *priv = libipw_priv(dev);
8904 
8905 	if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8906 		return -EINVAL;
8907 	mutex_lock(&priv->mutex);
8908 	if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data) ||
8909 	    is_zero_ether_addr(wrqu->ap_addr.sa_data)) {
8910 		/* we disable mandatory BSSID association */
8911 		IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8912 		priv->config &= ~CFG_STATIC_BSSID;
8913 		IPW_DEBUG_ASSOC("Attempting to associate with new "
8914 				"parameters.\n");
8915 		ipw_associate(priv);
8916 		mutex_unlock(&priv->mutex);
8917 		return 0;
8918 	}
8919 
8920 	priv->config |= CFG_STATIC_BSSID;
8921 	if (ether_addr_equal(priv->bssid, wrqu->ap_addr.sa_data)) {
8922 		IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8923 		mutex_unlock(&priv->mutex);
8924 		return 0;
8925 	}
8926 
8927 	IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n",
8928 		     wrqu->ap_addr.sa_data);
8929 
8930 	memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
8931 
8932 	/* Network configuration changed -- force [re]association */
8933 	IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
8934 	if (!ipw_disassociate(priv))
8935 		ipw_associate(priv);
8936 
8937 	mutex_unlock(&priv->mutex);
8938 	return 0;
8939 }
8940 
8941 static int ipw_wx_get_wap(struct net_device *dev,
8942 			  struct iw_request_info *info,
8943 			  union iwreq_data *wrqu, char *extra)
8944 {
8945 	struct ipw_priv *priv = libipw_priv(dev);
8946 
8947 	/* If we are associated, trying to associate, or have a statically
8948 	 * configured BSSID then return that; otherwise return ANY */
8949 	mutex_lock(&priv->mutex);
8950 	if (priv->config & CFG_STATIC_BSSID ||
8951 	    priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8952 		wrqu->ap_addr.sa_family = ARPHRD_ETHER;
8953 		memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
8954 	} else
8955 		eth_zero_addr(wrqu->ap_addr.sa_data);
8956 
8957 	IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
8958 		     wrqu->ap_addr.sa_data);
8959 	mutex_unlock(&priv->mutex);
8960 	return 0;
8961 }
8962 
8963 static int ipw_wx_set_essid(struct net_device *dev,
8964 			    struct iw_request_info *info,
8965 			    union iwreq_data *wrqu, char *extra)
8966 {
8967 	struct ipw_priv *priv = libipw_priv(dev);
8968         int length;
8969 
8970         mutex_lock(&priv->mutex);
8971 
8972         if (!wrqu->essid.flags)
8973         {
8974                 IPW_DEBUG_WX("Setting ESSID to ANY\n");
8975                 ipw_disassociate(priv);
8976                 priv->config &= ~CFG_STATIC_ESSID;
8977                 ipw_associate(priv);
8978                 mutex_unlock(&priv->mutex);
8979                 return 0;
8980         }
8981 
8982 	length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
8983 
8984 	priv->config |= CFG_STATIC_ESSID;
8985 
8986 	if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
8987 	    && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
8988 		IPW_DEBUG_WX("ESSID set to current ESSID.\n");
8989 		mutex_unlock(&priv->mutex);
8990 		return 0;
8991 	}
8992 
8993 	IPW_DEBUG_WX("Setting ESSID: '%*pE' (%d)\n", length, extra, length);
8994 
8995 	priv->essid_len = length;
8996 	memcpy(priv->essid, extra, priv->essid_len);
8997 
8998 	/* Network configuration changed -- force [re]association */
8999 	IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
9000 	if (!ipw_disassociate(priv))
9001 		ipw_associate(priv);
9002 
9003 	mutex_unlock(&priv->mutex);
9004 	return 0;
9005 }
9006 
9007 static int ipw_wx_get_essid(struct net_device *dev,
9008 			    struct iw_request_info *info,
9009 			    union iwreq_data *wrqu, char *extra)
9010 {
9011 	struct ipw_priv *priv = libipw_priv(dev);
9012 
9013 	/* If we are associated, trying to associate, or have a statically
9014 	 * configured ESSID then return that; otherwise return ANY */
9015 	mutex_lock(&priv->mutex);
9016 	if (priv->config & CFG_STATIC_ESSID ||
9017 	    priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9018 		IPW_DEBUG_WX("Getting essid: '%*pE'\n",
9019 			     priv->essid_len, priv->essid);
9020 		memcpy(extra, priv->essid, priv->essid_len);
9021 		wrqu->essid.length = priv->essid_len;
9022 		wrqu->essid.flags = 1;	/* active */
9023 	} else {
9024 		IPW_DEBUG_WX("Getting essid: ANY\n");
9025 		wrqu->essid.length = 0;
9026 		wrqu->essid.flags = 0;	/* active */
9027 	}
9028 	mutex_unlock(&priv->mutex);
9029 	return 0;
9030 }
9031 
9032 static int ipw_wx_set_nick(struct net_device *dev,
9033 			   struct iw_request_info *info,
9034 			   union iwreq_data *wrqu, char *extra)
9035 {
9036 	struct ipw_priv *priv = libipw_priv(dev);
9037 
9038 	IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
9039 	if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9040 		return -E2BIG;
9041 	mutex_lock(&priv->mutex);
9042 	wrqu->data.length = min_t(size_t, wrqu->data.length, sizeof(priv->nick));
9043 	memset(priv->nick, 0, sizeof(priv->nick));
9044 	memcpy(priv->nick, extra, wrqu->data.length);
9045 	IPW_DEBUG_TRACE("<<\n");
9046 	mutex_unlock(&priv->mutex);
9047 	return 0;
9048 
9049 }
9050 
9051 static int ipw_wx_get_nick(struct net_device *dev,
9052 			   struct iw_request_info *info,
9053 			   union iwreq_data *wrqu, char *extra)
9054 {
9055 	struct ipw_priv *priv = libipw_priv(dev);
9056 	IPW_DEBUG_WX("Getting nick\n");
9057 	mutex_lock(&priv->mutex);
9058 	wrqu->data.length = strlen(priv->nick);
9059 	memcpy(extra, priv->nick, wrqu->data.length);
9060 	wrqu->data.flags = 1;	/* active */
9061 	mutex_unlock(&priv->mutex);
9062 	return 0;
9063 }
9064 
9065 static int ipw_wx_set_sens(struct net_device *dev,
9066 			    struct iw_request_info *info,
9067 			    union iwreq_data *wrqu, char *extra)
9068 {
9069 	struct ipw_priv *priv = libipw_priv(dev);
9070 	int err = 0;
9071 
9072 	IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9073 	IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9074 	mutex_lock(&priv->mutex);
9075 
9076 	if (wrqu->sens.fixed == 0)
9077 	{
9078 		priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9079 		priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9080 		goto out;
9081 	}
9082 	if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9083 	    (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9084 		err = -EINVAL;
9085 		goto out;
9086 	}
9087 
9088 	priv->roaming_threshold = wrqu->sens.value;
9089 	priv->disassociate_threshold = 3*wrqu->sens.value;
9090       out:
9091 	mutex_unlock(&priv->mutex);
9092 	return err;
9093 }
9094 
9095 static int ipw_wx_get_sens(struct net_device *dev,
9096 			    struct iw_request_info *info,
9097 			    union iwreq_data *wrqu, char *extra)
9098 {
9099 	struct ipw_priv *priv = libipw_priv(dev);
9100 	mutex_lock(&priv->mutex);
9101 	wrqu->sens.fixed = 1;
9102 	wrqu->sens.value = priv->roaming_threshold;
9103 	mutex_unlock(&priv->mutex);
9104 
9105 	IPW_DEBUG_WX("GET roaming threshold -> %s %d\n",
9106 		     wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9107 
9108 	return 0;
9109 }
9110 
9111 static int ipw_wx_set_rate(struct net_device *dev,
9112 			   struct iw_request_info *info,
9113 			   union iwreq_data *wrqu, char *extra)
9114 {
9115 	/* TODO: We should use semaphores or locks for access to priv */
9116 	struct ipw_priv *priv = libipw_priv(dev);
9117 	u32 target_rate = wrqu->bitrate.value;
9118 	u32 fixed, mask;
9119 
9120 	/* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9121 	/* value = X, fixed = 1 means only rate X */
9122 	/* value = X, fixed = 0 means all rates lower equal X */
9123 
9124 	if (target_rate == -1) {
9125 		fixed = 0;
9126 		mask = LIBIPW_DEFAULT_RATES_MASK;
9127 		/* Now we should reassociate */
9128 		goto apply;
9129 	}
9130 
9131 	mask = 0;
9132 	fixed = wrqu->bitrate.fixed;
9133 
9134 	if (target_rate == 1000000 || !fixed)
9135 		mask |= LIBIPW_CCK_RATE_1MB_MASK;
9136 	if (target_rate == 1000000)
9137 		goto apply;
9138 
9139 	if (target_rate == 2000000 || !fixed)
9140 		mask |= LIBIPW_CCK_RATE_2MB_MASK;
9141 	if (target_rate == 2000000)
9142 		goto apply;
9143 
9144 	if (target_rate == 5500000 || !fixed)
9145 		mask |= LIBIPW_CCK_RATE_5MB_MASK;
9146 	if (target_rate == 5500000)
9147 		goto apply;
9148 
9149 	if (target_rate == 6000000 || !fixed)
9150 		mask |= LIBIPW_OFDM_RATE_6MB_MASK;
9151 	if (target_rate == 6000000)
9152 		goto apply;
9153 
9154 	if (target_rate == 9000000 || !fixed)
9155 		mask |= LIBIPW_OFDM_RATE_9MB_MASK;
9156 	if (target_rate == 9000000)
9157 		goto apply;
9158 
9159 	if (target_rate == 11000000 || !fixed)
9160 		mask |= LIBIPW_CCK_RATE_11MB_MASK;
9161 	if (target_rate == 11000000)
9162 		goto apply;
9163 
9164 	if (target_rate == 12000000 || !fixed)
9165 		mask |= LIBIPW_OFDM_RATE_12MB_MASK;
9166 	if (target_rate == 12000000)
9167 		goto apply;
9168 
9169 	if (target_rate == 18000000 || !fixed)
9170 		mask |= LIBIPW_OFDM_RATE_18MB_MASK;
9171 	if (target_rate == 18000000)
9172 		goto apply;
9173 
9174 	if (target_rate == 24000000 || !fixed)
9175 		mask |= LIBIPW_OFDM_RATE_24MB_MASK;
9176 	if (target_rate == 24000000)
9177 		goto apply;
9178 
9179 	if (target_rate == 36000000 || !fixed)
9180 		mask |= LIBIPW_OFDM_RATE_36MB_MASK;
9181 	if (target_rate == 36000000)
9182 		goto apply;
9183 
9184 	if (target_rate == 48000000 || !fixed)
9185 		mask |= LIBIPW_OFDM_RATE_48MB_MASK;
9186 	if (target_rate == 48000000)
9187 		goto apply;
9188 
9189 	if (target_rate == 54000000 || !fixed)
9190 		mask |= LIBIPW_OFDM_RATE_54MB_MASK;
9191 	if (target_rate == 54000000)
9192 		goto apply;
9193 
9194 	IPW_DEBUG_WX("invalid rate specified, returning error\n");
9195 	return -EINVAL;
9196 
9197       apply:
9198 	IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9199 		     mask, fixed ? "fixed" : "sub-rates");
9200 	mutex_lock(&priv->mutex);
9201 	if (mask == LIBIPW_DEFAULT_RATES_MASK) {
9202 		priv->config &= ~CFG_FIXED_RATE;
9203 		ipw_set_fixed_rate(priv, priv->ieee->mode);
9204 	} else
9205 		priv->config |= CFG_FIXED_RATE;
9206 
9207 	if (priv->rates_mask == mask) {
9208 		IPW_DEBUG_WX("Mask set to current mask.\n");
9209 		mutex_unlock(&priv->mutex);
9210 		return 0;
9211 	}
9212 
9213 	priv->rates_mask = mask;
9214 
9215 	/* Network configuration changed -- force [re]association */
9216 	IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9217 	if (!ipw_disassociate(priv))
9218 		ipw_associate(priv);
9219 
9220 	mutex_unlock(&priv->mutex);
9221 	return 0;
9222 }
9223 
9224 static int ipw_wx_get_rate(struct net_device *dev,
9225 			   struct iw_request_info *info,
9226 			   union iwreq_data *wrqu, char *extra)
9227 {
9228 	struct ipw_priv *priv = libipw_priv(dev);
9229 	mutex_lock(&priv->mutex);
9230 	wrqu->bitrate.value = priv->last_rate;
9231 	wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9232 	mutex_unlock(&priv->mutex);
9233 	IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
9234 	return 0;
9235 }
9236 
9237 static int ipw_wx_set_rts(struct net_device *dev,
9238 			  struct iw_request_info *info,
9239 			  union iwreq_data *wrqu, char *extra)
9240 {
9241 	struct ipw_priv *priv = libipw_priv(dev);
9242 	mutex_lock(&priv->mutex);
9243 	if (wrqu->rts.disabled || !wrqu->rts.fixed)
9244 		priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9245 	else {
9246 		if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9247 		    wrqu->rts.value > MAX_RTS_THRESHOLD) {
9248 			mutex_unlock(&priv->mutex);
9249 			return -EINVAL;
9250 		}
9251 		priv->rts_threshold = wrqu->rts.value;
9252 	}
9253 
9254 	ipw_send_rts_threshold(priv, priv->rts_threshold);
9255 	mutex_unlock(&priv->mutex);
9256 	IPW_DEBUG_WX("SET RTS Threshold -> %d\n", priv->rts_threshold);
9257 	return 0;
9258 }
9259 
9260 static int ipw_wx_get_rts(struct net_device *dev,
9261 			  struct iw_request_info *info,
9262 			  union iwreq_data *wrqu, char *extra)
9263 {
9264 	struct ipw_priv *priv = libipw_priv(dev);
9265 	mutex_lock(&priv->mutex);
9266 	wrqu->rts.value = priv->rts_threshold;
9267 	wrqu->rts.fixed = 0;	/* no auto select */
9268 	wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9269 	mutex_unlock(&priv->mutex);
9270 	IPW_DEBUG_WX("GET RTS Threshold -> %d\n", wrqu->rts.value);
9271 	return 0;
9272 }
9273 
9274 static int ipw_wx_set_txpow(struct net_device *dev,
9275 			    struct iw_request_info *info,
9276 			    union iwreq_data *wrqu, char *extra)
9277 {
9278 	struct ipw_priv *priv = libipw_priv(dev);
9279 	int err = 0;
9280 
9281 	mutex_lock(&priv->mutex);
9282 	if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9283 		err = -EINPROGRESS;
9284 		goto out;
9285 	}
9286 
9287 	if (!wrqu->power.fixed)
9288 		wrqu->power.value = IPW_TX_POWER_DEFAULT;
9289 
9290 	if (wrqu->power.flags != IW_TXPOW_DBM) {
9291 		err = -EINVAL;
9292 		goto out;
9293 	}
9294 
9295 	if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9296 	    (wrqu->power.value < IPW_TX_POWER_MIN)) {
9297 		err = -EINVAL;
9298 		goto out;
9299 	}
9300 
9301 	priv->tx_power = wrqu->power.value;
9302 	err = ipw_set_tx_power(priv);
9303       out:
9304 	mutex_unlock(&priv->mutex);
9305 	return err;
9306 }
9307 
9308 static int ipw_wx_get_txpow(struct net_device *dev,
9309 			    struct iw_request_info *info,
9310 			    union iwreq_data *wrqu, char *extra)
9311 {
9312 	struct ipw_priv *priv = libipw_priv(dev);
9313 	mutex_lock(&priv->mutex);
9314 	wrqu->power.value = priv->tx_power;
9315 	wrqu->power.fixed = 1;
9316 	wrqu->power.flags = IW_TXPOW_DBM;
9317 	wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9318 	mutex_unlock(&priv->mutex);
9319 
9320 	IPW_DEBUG_WX("GET TX Power -> %s %d\n",
9321 		     wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9322 
9323 	return 0;
9324 }
9325 
9326 static int ipw_wx_set_frag(struct net_device *dev,
9327 			   struct iw_request_info *info,
9328 			   union iwreq_data *wrqu, char *extra)
9329 {
9330 	struct ipw_priv *priv = libipw_priv(dev);
9331 	mutex_lock(&priv->mutex);
9332 	if (wrqu->frag.disabled || !wrqu->frag.fixed)
9333 		priv->ieee->fts = DEFAULT_FTS;
9334 	else {
9335 		if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9336 		    wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9337 			mutex_unlock(&priv->mutex);
9338 			return -EINVAL;
9339 		}
9340 
9341 		priv->ieee->fts = wrqu->frag.value & ~0x1;
9342 	}
9343 
9344 	ipw_send_frag_threshold(priv, wrqu->frag.value);
9345 	mutex_unlock(&priv->mutex);
9346 	IPW_DEBUG_WX("SET Frag Threshold -> %d\n", wrqu->frag.value);
9347 	return 0;
9348 }
9349 
9350 static int ipw_wx_get_frag(struct net_device *dev,
9351 			   struct iw_request_info *info,
9352 			   union iwreq_data *wrqu, char *extra)
9353 {
9354 	struct ipw_priv *priv = libipw_priv(dev);
9355 	mutex_lock(&priv->mutex);
9356 	wrqu->frag.value = priv->ieee->fts;
9357 	wrqu->frag.fixed = 0;	/* no auto select */
9358 	wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9359 	mutex_unlock(&priv->mutex);
9360 	IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
9361 
9362 	return 0;
9363 }
9364 
9365 static int ipw_wx_set_retry(struct net_device *dev,
9366 			    struct iw_request_info *info,
9367 			    union iwreq_data *wrqu, char *extra)
9368 {
9369 	struct ipw_priv *priv = libipw_priv(dev);
9370 
9371 	if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9372 		return -EINVAL;
9373 
9374 	if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9375 		return 0;
9376 
9377 	if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9378 		return -EINVAL;
9379 
9380 	mutex_lock(&priv->mutex);
9381 	if (wrqu->retry.flags & IW_RETRY_SHORT)
9382 		priv->short_retry_limit = (u8) wrqu->retry.value;
9383 	else if (wrqu->retry.flags & IW_RETRY_LONG)
9384 		priv->long_retry_limit = (u8) wrqu->retry.value;
9385 	else {
9386 		priv->short_retry_limit = (u8) wrqu->retry.value;
9387 		priv->long_retry_limit = (u8) wrqu->retry.value;
9388 	}
9389 
9390 	ipw_send_retry_limit(priv, priv->short_retry_limit,
9391 			     priv->long_retry_limit);
9392 	mutex_unlock(&priv->mutex);
9393 	IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9394 		     priv->short_retry_limit, priv->long_retry_limit);
9395 	return 0;
9396 }
9397 
9398 static int ipw_wx_get_retry(struct net_device *dev,
9399 			    struct iw_request_info *info,
9400 			    union iwreq_data *wrqu, char *extra)
9401 {
9402 	struct ipw_priv *priv = libipw_priv(dev);
9403 
9404 	mutex_lock(&priv->mutex);
9405 	wrqu->retry.disabled = 0;
9406 
9407 	if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9408 		mutex_unlock(&priv->mutex);
9409 		return -EINVAL;
9410 	}
9411 
9412 	if (wrqu->retry.flags & IW_RETRY_LONG) {
9413 		wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9414 		wrqu->retry.value = priv->long_retry_limit;
9415 	} else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9416 		wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9417 		wrqu->retry.value = priv->short_retry_limit;
9418 	} else {
9419 		wrqu->retry.flags = IW_RETRY_LIMIT;
9420 		wrqu->retry.value = priv->short_retry_limit;
9421 	}
9422 	mutex_unlock(&priv->mutex);
9423 
9424 	IPW_DEBUG_WX("GET retry -> %d\n", wrqu->retry.value);
9425 
9426 	return 0;
9427 }
9428 
9429 static int ipw_wx_set_scan(struct net_device *dev,
9430 			   struct iw_request_info *info,
9431 			   union iwreq_data *wrqu, char *extra)
9432 {
9433 	struct ipw_priv *priv = libipw_priv(dev);
9434 	struct iw_scan_req *req = (struct iw_scan_req *)extra;
9435 	struct delayed_work *work = NULL;
9436 
9437 	mutex_lock(&priv->mutex);
9438 
9439 	priv->user_requested_scan = 1;
9440 
9441 	if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9442 		if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9443 			int len = min((int)req->essid_len,
9444 			              (int)sizeof(priv->direct_scan_ssid));
9445 			memcpy(priv->direct_scan_ssid, req->essid, len);
9446 			priv->direct_scan_ssid_len = len;
9447 			work = &priv->request_direct_scan;
9448 		} else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9449 			work = &priv->request_passive_scan;
9450 		}
9451 	} else {
9452 		/* Normal active broadcast scan */
9453 		work = &priv->request_scan;
9454 	}
9455 
9456 	mutex_unlock(&priv->mutex);
9457 
9458 	IPW_DEBUG_WX("Start scan\n");
9459 
9460 	schedule_delayed_work(work, 0);
9461 
9462 	return 0;
9463 }
9464 
9465 static int ipw_wx_get_scan(struct net_device *dev,
9466 			   struct iw_request_info *info,
9467 			   union iwreq_data *wrqu, char *extra)
9468 {
9469 	struct ipw_priv *priv = libipw_priv(dev);
9470 	return libipw_wx_get_scan(priv->ieee, info, wrqu, extra);
9471 }
9472 
9473 static int ipw_wx_set_encode(struct net_device *dev,
9474 			     struct iw_request_info *info,
9475 			     union iwreq_data *wrqu, char *key)
9476 {
9477 	struct ipw_priv *priv = libipw_priv(dev);
9478 	int ret;
9479 	u32 cap = priv->capability;
9480 
9481 	mutex_lock(&priv->mutex);
9482 	ret = libipw_wx_set_encode(priv->ieee, info, wrqu, key);
9483 
9484 	/* In IBSS mode, we need to notify the firmware to update
9485 	 * the beacon info after we changed the capability. */
9486 	if (cap != priv->capability &&
9487 	    priv->ieee->iw_mode == IW_MODE_ADHOC &&
9488 	    priv->status & STATUS_ASSOCIATED)
9489 		ipw_disassociate(priv);
9490 
9491 	mutex_unlock(&priv->mutex);
9492 	return ret;
9493 }
9494 
9495 static int ipw_wx_get_encode(struct net_device *dev,
9496 			     struct iw_request_info *info,
9497 			     union iwreq_data *wrqu, char *key)
9498 {
9499 	struct ipw_priv *priv = libipw_priv(dev);
9500 	return libipw_wx_get_encode(priv->ieee, info, wrqu, key);
9501 }
9502 
9503 static int ipw_wx_set_power(struct net_device *dev,
9504 			    struct iw_request_info *info,
9505 			    union iwreq_data *wrqu, char *extra)
9506 {
9507 	struct ipw_priv *priv = libipw_priv(dev);
9508 	int err;
9509 	mutex_lock(&priv->mutex);
9510 	if (wrqu->power.disabled) {
9511 		priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9512 		err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9513 		if (err) {
9514 			IPW_DEBUG_WX("failed setting power mode.\n");
9515 			mutex_unlock(&priv->mutex);
9516 			return err;
9517 		}
9518 		IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9519 		mutex_unlock(&priv->mutex);
9520 		return 0;
9521 	}
9522 
9523 	switch (wrqu->power.flags & IW_POWER_MODE) {
9524 	case IW_POWER_ON:	/* If not specified */
9525 	case IW_POWER_MODE:	/* If set all mask */
9526 	case IW_POWER_ALL_R:	/* If explicitly state all */
9527 		break;
9528 	default:		/* Otherwise we don't support it */
9529 		IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9530 			     wrqu->power.flags);
9531 		mutex_unlock(&priv->mutex);
9532 		return -EOPNOTSUPP;
9533 	}
9534 
9535 	/* If the user hasn't specified a power management mode yet, default
9536 	 * to BATTERY */
9537 	if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9538 		priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9539 	else
9540 		priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9541 
9542 	err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9543 	if (err) {
9544 		IPW_DEBUG_WX("failed setting power mode.\n");
9545 		mutex_unlock(&priv->mutex);
9546 		return err;
9547 	}
9548 
9549 	IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9550 	mutex_unlock(&priv->mutex);
9551 	return 0;
9552 }
9553 
9554 static int ipw_wx_get_power(struct net_device *dev,
9555 			    struct iw_request_info *info,
9556 			    union iwreq_data *wrqu, char *extra)
9557 {
9558 	struct ipw_priv *priv = libipw_priv(dev);
9559 	mutex_lock(&priv->mutex);
9560 	if (!(priv->power_mode & IPW_POWER_ENABLED))
9561 		wrqu->power.disabled = 1;
9562 	else
9563 		wrqu->power.disabled = 0;
9564 
9565 	mutex_unlock(&priv->mutex);
9566 	IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9567 
9568 	return 0;
9569 }
9570 
9571 static int ipw_wx_set_powermode(struct net_device *dev,
9572 				struct iw_request_info *info,
9573 				union iwreq_data *wrqu, char *extra)
9574 {
9575 	struct ipw_priv *priv = libipw_priv(dev);
9576 	int mode = *(int *)extra;
9577 	int err;
9578 
9579 	mutex_lock(&priv->mutex);
9580 	if ((mode < 1) || (mode > IPW_POWER_LIMIT))
9581 		mode = IPW_POWER_AC;
9582 
9583 	if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
9584 		err = ipw_send_power_mode(priv, mode);
9585 		if (err) {
9586 			IPW_DEBUG_WX("failed setting power mode.\n");
9587 			mutex_unlock(&priv->mutex);
9588 			return err;
9589 		}
9590 		priv->power_mode = IPW_POWER_ENABLED | mode;
9591 	}
9592 	mutex_unlock(&priv->mutex);
9593 	return 0;
9594 }
9595 
9596 #define MAX_WX_STRING 80
9597 static int ipw_wx_get_powermode(struct net_device *dev,
9598 				struct iw_request_info *info,
9599 				union iwreq_data *wrqu, char *extra)
9600 {
9601 	struct ipw_priv *priv = libipw_priv(dev);
9602 	int level = IPW_POWER_LEVEL(priv->power_mode);
9603 	char *p = extra;
9604 
9605 	p += scnprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9606 
9607 	switch (level) {
9608 	case IPW_POWER_AC:
9609 		p += scnprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9610 		break;
9611 	case IPW_POWER_BATTERY:
9612 		p += scnprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9613 		break;
9614 	default:
9615 		p += scnprintf(p, MAX_WX_STRING - (p - extra),
9616 			      "(Timeout %dms, Period %dms)",
9617 			      timeout_duration[level - 1] / 1000,
9618 			      period_duration[level - 1] / 1000);
9619 	}
9620 
9621 	if (!(priv->power_mode & IPW_POWER_ENABLED))
9622 		p += scnprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9623 
9624 	wrqu->data.length = p - extra + 1;
9625 
9626 	return 0;
9627 }
9628 
9629 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9630 				    struct iw_request_info *info,
9631 				    union iwreq_data *wrqu, char *extra)
9632 {
9633 	struct ipw_priv *priv = libipw_priv(dev);
9634 	int mode = *(int *)extra;
9635 	u8 band = 0, modulation = 0;
9636 
9637 	if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9638 		IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9639 		return -EINVAL;
9640 	}
9641 	mutex_lock(&priv->mutex);
9642 	if (priv->adapter == IPW_2915ABG) {
9643 		priv->ieee->abg_true = 1;
9644 		if (mode & IEEE_A) {
9645 			band |= LIBIPW_52GHZ_BAND;
9646 			modulation |= LIBIPW_OFDM_MODULATION;
9647 		} else
9648 			priv->ieee->abg_true = 0;
9649 	} else {
9650 		if (mode & IEEE_A) {
9651 			IPW_WARNING("Attempt to set 2200BG into "
9652 				    "802.11a mode\n");
9653 			mutex_unlock(&priv->mutex);
9654 			return -EINVAL;
9655 		}
9656 
9657 		priv->ieee->abg_true = 0;
9658 	}
9659 
9660 	if (mode & IEEE_B) {
9661 		band |= LIBIPW_24GHZ_BAND;
9662 		modulation |= LIBIPW_CCK_MODULATION;
9663 	} else
9664 		priv->ieee->abg_true = 0;
9665 
9666 	if (mode & IEEE_G) {
9667 		band |= LIBIPW_24GHZ_BAND;
9668 		modulation |= LIBIPW_OFDM_MODULATION;
9669 	} else
9670 		priv->ieee->abg_true = 0;
9671 
9672 	priv->ieee->mode = mode;
9673 	priv->ieee->freq_band = band;
9674 	priv->ieee->modulation = modulation;
9675 	init_supported_rates(priv, &priv->rates);
9676 
9677 	/* Network configuration changed -- force [re]association */
9678 	IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9679 	if (!ipw_disassociate(priv)) {
9680 		ipw_send_supported_rates(priv, &priv->rates);
9681 		ipw_associate(priv);
9682 	}
9683 
9684 	/* Update the band LEDs */
9685 	ipw_led_band_on(priv);
9686 
9687 	IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9688 		     mode & IEEE_A ? 'a' : '.',
9689 		     mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9690 	mutex_unlock(&priv->mutex);
9691 	return 0;
9692 }
9693 
9694 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9695 				    struct iw_request_info *info,
9696 				    union iwreq_data *wrqu, char *extra)
9697 {
9698 	struct ipw_priv *priv = libipw_priv(dev);
9699 	mutex_lock(&priv->mutex);
9700 	switch (priv->ieee->mode) {
9701 	case IEEE_A:
9702 		strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9703 		break;
9704 	case IEEE_B:
9705 		strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9706 		break;
9707 	case IEEE_A | IEEE_B:
9708 		strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9709 		break;
9710 	case IEEE_G:
9711 		strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9712 		break;
9713 	case IEEE_A | IEEE_G:
9714 		strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9715 		break;
9716 	case IEEE_B | IEEE_G:
9717 		strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9718 		break;
9719 	case IEEE_A | IEEE_B | IEEE_G:
9720 		strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9721 		break;
9722 	default:
9723 		strncpy(extra, "unknown", MAX_WX_STRING);
9724 		break;
9725 	}
9726 	extra[MAX_WX_STRING - 1] = '\0';
9727 
9728 	IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9729 
9730 	wrqu->data.length = strlen(extra) + 1;
9731 	mutex_unlock(&priv->mutex);
9732 
9733 	return 0;
9734 }
9735 
9736 static int ipw_wx_set_preamble(struct net_device *dev,
9737 			       struct iw_request_info *info,
9738 			       union iwreq_data *wrqu, char *extra)
9739 {
9740 	struct ipw_priv *priv = libipw_priv(dev);
9741 	int mode = *(int *)extra;
9742 	mutex_lock(&priv->mutex);
9743 	/* Switching from SHORT -> LONG requires a disassociation */
9744 	if (mode == 1) {
9745 		if (!(priv->config & CFG_PREAMBLE_LONG)) {
9746 			priv->config |= CFG_PREAMBLE_LONG;
9747 
9748 			/* Network configuration changed -- force [re]association */
9749 			IPW_DEBUG_ASSOC
9750 			    ("[re]association triggered due to preamble change.\n");
9751 			if (!ipw_disassociate(priv))
9752 				ipw_associate(priv);
9753 		}
9754 		goto done;
9755 	}
9756 
9757 	if (mode == 0) {
9758 		priv->config &= ~CFG_PREAMBLE_LONG;
9759 		goto done;
9760 	}
9761 	mutex_unlock(&priv->mutex);
9762 	return -EINVAL;
9763 
9764       done:
9765 	mutex_unlock(&priv->mutex);
9766 	return 0;
9767 }
9768 
9769 static int ipw_wx_get_preamble(struct net_device *dev,
9770 			       struct iw_request_info *info,
9771 			       union iwreq_data *wrqu, char *extra)
9772 {
9773 	struct ipw_priv *priv = libipw_priv(dev);
9774 	mutex_lock(&priv->mutex);
9775 	if (priv->config & CFG_PREAMBLE_LONG)
9776 		snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9777 	else
9778 		snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9779 	mutex_unlock(&priv->mutex);
9780 	return 0;
9781 }
9782 
9783 #ifdef CONFIG_IPW2200_MONITOR
9784 static int ipw_wx_set_monitor(struct net_device *dev,
9785 			      struct iw_request_info *info,
9786 			      union iwreq_data *wrqu, char *extra)
9787 {
9788 	struct ipw_priv *priv = libipw_priv(dev);
9789 	int *parms = (int *)extra;
9790 	int enable = (parms[0] > 0);
9791 	mutex_lock(&priv->mutex);
9792 	IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9793 	if (enable) {
9794 		if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9795 #ifdef CONFIG_IPW2200_RADIOTAP
9796 			priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9797 #else
9798 			priv->net_dev->type = ARPHRD_IEEE80211;
9799 #endif
9800 			schedule_work(&priv->adapter_restart);
9801 		}
9802 
9803 		ipw_set_channel(priv, parms[1]);
9804 	} else {
9805 		if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9806 			mutex_unlock(&priv->mutex);
9807 			return 0;
9808 		}
9809 		priv->net_dev->type = ARPHRD_ETHER;
9810 		schedule_work(&priv->adapter_restart);
9811 	}
9812 	mutex_unlock(&priv->mutex);
9813 	return 0;
9814 }
9815 
9816 #endif				/* CONFIG_IPW2200_MONITOR */
9817 
9818 static int ipw_wx_reset(struct net_device *dev,
9819 			struct iw_request_info *info,
9820 			union iwreq_data *wrqu, char *extra)
9821 {
9822 	struct ipw_priv *priv = libipw_priv(dev);
9823 	IPW_DEBUG_WX("RESET\n");
9824 	schedule_work(&priv->adapter_restart);
9825 	return 0;
9826 }
9827 
9828 static int ipw_wx_sw_reset(struct net_device *dev,
9829 			   struct iw_request_info *info,
9830 			   union iwreq_data *wrqu, char *extra)
9831 {
9832 	struct ipw_priv *priv = libipw_priv(dev);
9833 	union iwreq_data wrqu_sec = {
9834 		.encoding = {
9835 			     .flags = IW_ENCODE_DISABLED,
9836 			     },
9837 	};
9838 	int ret;
9839 
9840 	IPW_DEBUG_WX("SW_RESET\n");
9841 
9842 	mutex_lock(&priv->mutex);
9843 
9844 	ret = ipw_sw_reset(priv, 2);
9845 	if (!ret) {
9846 		free_firmware();
9847 		ipw_adapter_restart(priv);
9848 	}
9849 
9850 	/* The SW reset bit might have been toggled on by the 'disable'
9851 	 * module parameter, so take appropriate action */
9852 	ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9853 
9854 	mutex_unlock(&priv->mutex);
9855 	libipw_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9856 	mutex_lock(&priv->mutex);
9857 
9858 	if (!(priv->status & STATUS_RF_KILL_MASK)) {
9859 		/* Configuration likely changed -- force [re]association */
9860 		IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9861 				"reset.\n");
9862 		if (!ipw_disassociate(priv))
9863 			ipw_associate(priv);
9864 	}
9865 
9866 	mutex_unlock(&priv->mutex);
9867 
9868 	return 0;
9869 }
9870 
9871 /* Rebase the WE IOCTLs to zero for the handler array */
9872 static iw_handler ipw_wx_handlers[] = {
9873 	IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
9874 	IW_HANDLER(SIOCSIWFREQ, ipw_wx_set_freq),
9875 	IW_HANDLER(SIOCGIWFREQ, ipw_wx_get_freq),
9876 	IW_HANDLER(SIOCSIWMODE, ipw_wx_set_mode),
9877 	IW_HANDLER(SIOCGIWMODE, ipw_wx_get_mode),
9878 	IW_HANDLER(SIOCSIWSENS, ipw_wx_set_sens),
9879 	IW_HANDLER(SIOCGIWSENS, ipw_wx_get_sens),
9880 	IW_HANDLER(SIOCGIWRANGE, ipw_wx_get_range),
9881 	IW_HANDLER(SIOCSIWAP, ipw_wx_set_wap),
9882 	IW_HANDLER(SIOCGIWAP, ipw_wx_get_wap),
9883 	IW_HANDLER(SIOCSIWSCAN, ipw_wx_set_scan),
9884 	IW_HANDLER(SIOCGIWSCAN, ipw_wx_get_scan),
9885 	IW_HANDLER(SIOCSIWESSID, ipw_wx_set_essid),
9886 	IW_HANDLER(SIOCGIWESSID, ipw_wx_get_essid),
9887 	IW_HANDLER(SIOCSIWNICKN, ipw_wx_set_nick),
9888 	IW_HANDLER(SIOCGIWNICKN, ipw_wx_get_nick),
9889 	IW_HANDLER(SIOCSIWRATE, ipw_wx_set_rate),
9890 	IW_HANDLER(SIOCGIWRATE, ipw_wx_get_rate),
9891 	IW_HANDLER(SIOCSIWRTS, ipw_wx_set_rts),
9892 	IW_HANDLER(SIOCGIWRTS, ipw_wx_get_rts),
9893 	IW_HANDLER(SIOCSIWFRAG, ipw_wx_set_frag),
9894 	IW_HANDLER(SIOCGIWFRAG, ipw_wx_get_frag),
9895 	IW_HANDLER(SIOCSIWTXPOW, ipw_wx_set_txpow),
9896 	IW_HANDLER(SIOCGIWTXPOW, ipw_wx_get_txpow),
9897 	IW_HANDLER(SIOCSIWRETRY, ipw_wx_set_retry),
9898 	IW_HANDLER(SIOCGIWRETRY, ipw_wx_get_retry),
9899 	IW_HANDLER(SIOCSIWENCODE, ipw_wx_set_encode),
9900 	IW_HANDLER(SIOCGIWENCODE, ipw_wx_get_encode),
9901 	IW_HANDLER(SIOCSIWPOWER, ipw_wx_set_power),
9902 	IW_HANDLER(SIOCGIWPOWER, ipw_wx_get_power),
9903 	IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
9904 	IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
9905 	IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
9906 	IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
9907 	IW_HANDLER(SIOCSIWGENIE, ipw_wx_set_genie),
9908 	IW_HANDLER(SIOCGIWGENIE, ipw_wx_get_genie),
9909 	IW_HANDLER(SIOCSIWMLME, ipw_wx_set_mlme),
9910 	IW_HANDLER(SIOCSIWAUTH, ipw_wx_set_auth),
9911 	IW_HANDLER(SIOCGIWAUTH, ipw_wx_get_auth),
9912 	IW_HANDLER(SIOCSIWENCODEEXT, ipw_wx_set_encodeext),
9913 	IW_HANDLER(SIOCGIWENCODEEXT, ipw_wx_get_encodeext),
9914 };
9915 
9916 enum {
9917 	IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
9918 	IPW_PRIV_GET_POWER,
9919 	IPW_PRIV_SET_MODE,
9920 	IPW_PRIV_GET_MODE,
9921 	IPW_PRIV_SET_PREAMBLE,
9922 	IPW_PRIV_GET_PREAMBLE,
9923 	IPW_PRIV_RESET,
9924 	IPW_PRIV_SW_RESET,
9925 #ifdef CONFIG_IPW2200_MONITOR
9926 	IPW_PRIV_SET_MONITOR,
9927 #endif
9928 };
9929 
9930 static struct iw_priv_args ipw_priv_args[] = {
9931 	{
9932 	 .cmd = IPW_PRIV_SET_POWER,
9933 	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9934 	 .name = "set_power"},
9935 	{
9936 	 .cmd = IPW_PRIV_GET_POWER,
9937 	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9938 	 .name = "get_power"},
9939 	{
9940 	 .cmd = IPW_PRIV_SET_MODE,
9941 	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9942 	 .name = "set_mode"},
9943 	{
9944 	 .cmd = IPW_PRIV_GET_MODE,
9945 	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9946 	 .name = "get_mode"},
9947 	{
9948 	 .cmd = IPW_PRIV_SET_PREAMBLE,
9949 	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9950 	 .name = "set_preamble"},
9951 	{
9952 	 .cmd = IPW_PRIV_GET_PREAMBLE,
9953 	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
9954 	 .name = "get_preamble"},
9955 	{
9956 	 IPW_PRIV_RESET,
9957 	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
9958 	{
9959 	 IPW_PRIV_SW_RESET,
9960 	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
9961 #ifdef CONFIG_IPW2200_MONITOR
9962 	{
9963 	 IPW_PRIV_SET_MONITOR,
9964 	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
9965 #endif				/* CONFIG_IPW2200_MONITOR */
9966 };
9967 
9968 static iw_handler ipw_priv_handler[] = {
9969 	ipw_wx_set_powermode,
9970 	ipw_wx_get_powermode,
9971 	ipw_wx_set_wireless_mode,
9972 	ipw_wx_get_wireless_mode,
9973 	ipw_wx_set_preamble,
9974 	ipw_wx_get_preamble,
9975 	ipw_wx_reset,
9976 	ipw_wx_sw_reset,
9977 #ifdef CONFIG_IPW2200_MONITOR
9978 	ipw_wx_set_monitor,
9979 #endif
9980 };
9981 
9982 static const struct iw_handler_def ipw_wx_handler_def = {
9983 	.standard = ipw_wx_handlers,
9984 	.num_standard = ARRAY_SIZE(ipw_wx_handlers),
9985 	.num_private = ARRAY_SIZE(ipw_priv_handler),
9986 	.num_private_args = ARRAY_SIZE(ipw_priv_args),
9987 	.private = ipw_priv_handler,
9988 	.private_args = ipw_priv_args,
9989 	.get_wireless_stats = ipw_get_wireless_stats,
9990 };
9991 
9992 /*
9993  * Get wireless statistics.
9994  * Called by /proc/net/wireless
9995  * Also called by SIOCGIWSTATS
9996  */
9997 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
9998 {
9999 	struct ipw_priv *priv = libipw_priv(dev);
10000 	struct iw_statistics *wstats;
10001 
10002 	wstats = &priv->wstats;
10003 
10004 	/* if hw is disabled, then ipw_get_ordinal() can't be called.
10005 	 * netdev->get_wireless_stats seems to be called before fw is
10006 	 * initialized.  STATUS_ASSOCIATED will only be set if the hw is up
10007 	 * and associated; if not associcated, the values are all meaningless
10008 	 * anyway, so set them all to NULL and INVALID */
10009 	if (!(priv->status & STATUS_ASSOCIATED)) {
10010 		wstats->miss.beacon = 0;
10011 		wstats->discard.retries = 0;
10012 		wstats->qual.qual = 0;
10013 		wstats->qual.level = 0;
10014 		wstats->qual.noise = 0;
10015 		wstats->qual.updated = 7;
10016 		wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10017 		    IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10018 		return wstats;
10019 	}
10020 
10021 	wstats->qual.qual = priv->quality;
10022 	wstats->qual.level = priv->exp_avg_rssi;
10023 	wstats->qual.noise = priv->exp_avg_noise;
10024 	wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10025 	    IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10026 
10027 	wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10028 	wstats->discard.retries = priv->last_tx_failures;
10029 	wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10030 
10031 /*	if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10032 	goto fail_get_ordinal;
10033 	wstats->discard.retries += tx_retry; */
10034 
10035 	return wstats;
10036 }
10037 
10038 /* net device stuff */
10039 
10040 static  void init_sys_config(struct ipw_sys_config *sys_config)
10041 {
10042 	memset(sys_config, 0, sizeof(struct ipw_sys_config));
10043 	sys_config->bt_coexistence = 0;
10044 	sys_config->answer_broadcast_ssid_probe = 0;
10045 	sys_config->accept_all_data_frames = 0;
10046 	sys_config->accept_non_directed_frames = 1;
10047 	sys_config->exclude_unicast_unencrypted = 0;
10048 	sys_config->disable_unicast_decryption = 1;
10049 	sys_config->exclude_multicast_unencrypted = 0;
10050 	sys_config->disable_multicast_decryption = 1;
10051 	if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10052 		antenna = CFG_SYS_ANTENNA_BOTH;
10053 	sys_config->antenna_diversity = antenna;
10054 	sys_config->pass_crc_to_host = 0;	/* TODO: See if 1 gives us FCS */
10055 	sys_config->dot11g_auto_detection = 0;
10056 	sys_config->enable_cts_to_self = 0;
10057 	sys_config->bt_coexist_collision_thr = 0;
10058 	sys_config->pass_noise_stats_to_host = 1;	/* 1 -- fix for 256 */
10059 	sys_config->silence_threshold = 0x1e;
10060 }
10061 
10062 static int ipw_net_open(struct net_device *dev)
10063 {
10064 	IPW_DEBUG_INFO("dev->open\n");
10065 	netif_start_queue(dev);
10066 	return 0;
10067 }
10068 
10069 static int ipw_net_stop(struct net_device *dev)
10070 {
10071 	IPW_DEBUG_INFO("dev->close\n");
10072 	netif_stop_queue(dev);
10073 	return 0;
10074 }
10075 
10076 /*
10077 todo:
10078 
10079 modify to send one tfd per fragment instead of using chunking.  otherwise
10080 we need to heavily modify the libipw_skb_to_txb.
10081 */
10082 
10083 static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
10084 			     int pri)
10085 {
10086 	struct libipw_hdr_3addrqos *hdr = (struct libipw_hdr_3addrqos *)
10087 	    txb->fragments[0]->data;
10088 	int i = 0;
10089 	struct tfd_frame *tfd;
10090 #ifdef CONFIG_IPW2200_QOS
10091 	int tx_id = ipw_get_tx_queue_number(priv, pri);
10092 	struct clx2_tx_queue *txq = &priv->txq[tx_id];
10093 #else
10094 	struct clx2_tx_queue *txq = &priv->txq[0];
10095 #endif
10096 	struct clx2_queue *q = &txq->q;
10097 	u8 id, hdr_len, unicast;
10098 	int fc;
10099 
10100 	if (!(priv->status & STATUS_ASSOCIATED))
10101 		goto drop;
10102 
10103 	hdr_len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10104 	switch (priv->ieee->iw_mode) {
10105 	case IW_MODE_ADHOC:
10106 		unicast = !is_multicast_ether_addr(hdr->addr1);
10107 		id = ipw_find_station(priv, hdr->addr1);
10108 		if (id == IPW_INVALID_STATION) {
10109 			id = ipw_add_station(priv, hdr->addr1);
10110 			if (id == IPW_INVALID_STATION) {
10111 				IPW_WARNING("Attempt to send data to "
10112 					    "invalid cell: %pM\n",
10113 					    hdr->addr1);
10114 				goto drop;
10115 			}
10116 		}
10117 		break;
10118 
10119 	case IW_MODE_INFRA:
10120 	default:
10121 		unicast = !is_multicast_ether_addr(hdr->addr3);
10122 		id = 0;
10123 		break;
10124 	}
10125 
10126 	tfd = &txq->bd[q->first_empty];
10127 	txq->txb[q->first_empty] = txb;
10128 	memset(tfd, 0, sizeof(*tfd));
10129 	tfd->u.data.station_number = id;
10130 
10131 	tfd->control_flags.message_type = TX_FRAME_TYPE;
10132 	tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10133 
10134 	tfd->u.data.cmd_id = DINO_CMD_TX;
10135 	tfd->u.data.len = cpu_to_le16(txb->payload_size);
10136 
10137 	if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10138 		tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10139 	else
10140 		tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10141 
10142 	if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10143 		tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10144 
10145 	fc = le16_to_cpu(hdr->frame_ctl);
10146 	hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10147 
10148 	memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10149 
10150 	if (likely(unicast))
10151 		tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10152 
10153 	if (txb->encrypted && !priv->ieee->host_encrypt) {
10154 		switch (priv->ieee->sec.level) {
10155 		case SEC_LEVEL_3:
10156 			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10157 			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10158 			/* XXX: ACK flag must be set for CCMP even if it
10159 			 * is a multicast/broadcast packet, because CCMP
10160 			 * group communication encrypted by GTK is
10161 			 * actually done by the AP. */
10162 			if (!unicast)
10163 				tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10164 
10165 			tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10166 			tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10167 			tfd->u.data.key_index = 0;
10168 			tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10169 			break;
10170 		case SEC_LEVEL_2:
10171 			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10172 			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10173 			tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10174 			tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10175 			tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10176 			break;
10177 		case SEC_LEVEL_1:
10178 			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10179 			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10180 			tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx;
10181 			if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <=
10182 			    40)
10183 				tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10184 			else
10185 				tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10186 			break;
10187 		case SEC_LEVEL_0:
10188 			break;
10189 		default:
10190 			printk(KERN_ERR "Unknown security level %d\n",
10191 			       priv->ieee->sec.level);
10192 			break;
10193 		}
10194 	} else
10195 		/* No hardware encryption */
10196 		tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10197 
10198 #ifdef CONFIG_IPW2200_QOS
10199 	if (fc & IEEE80211_STYPE_QOS_DATA)
10200 		ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10201 #endif				/* CONFIG_IPW2200_QOS */
10202 
10203 	/* payload */
10204 	tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10205 						 txb->nr_frags));
10206 	IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10207 		       txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10208 	for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10209 		IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10210 			       i, le32_to_cpu(tfd->u.data.num_chunks),
10211 			       txb->fragments[i]->len - hdr_len);
10212 		IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10213 			     i, tfd->u.data.num_chunks,
10214 			     txb->fragments[i]->len - hdr_len);
10215 		printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10216 			   txb->fragments[i]->len - hdr_len);
10217 
10218 		tfd->u.data.chunk_ptr[i] =
10219 		    cpu_to_le32(dma_map_single(&priv->pci_dev->dev,
10220 					       txb->fragments[i]->data + hdr_len,
10221 					       txb->fragments[i]->len - hdr_len,
10222 					       DMA_TO_DEVICE));
10223 		tfd->u.data.chunk_len[i] =
10224 		    cpu_to_le16(txb->fragments[i]->len - hdr_len);
10225 	}
10226 
10227 	if (i != txb->nr_frags) {
10228 		struct sk_buff *skb;
10229 		u16 remaining_bytes = 0;
10230 		int j;
10231 
10232 		for (j = i; j < txb->nr_frags; j++)
10233 			remaining_bytes += txb->fragments[j]->len - hdr_len;
10234 
10235 		printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10236 		       remaining_bytes);
10237 		skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10238 		if (skb != NULL) {
10239 			tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10240 			for (j = i; j < txb->nr_frags; j++) {
10241 				int size = txb->fragments[j]->len - hdr_len;
10242 
10243 				printk(KERN_INFO "Adding frag %d %d...\n",
10244 				       j, size);
10245 				skb_put_data(skb,
10246 					     txb->fragments[j]->data + hdr_len,
10247 					     size);
10248 			}
10249 			dev_kfree_skb_any(txb->fragments[i]);
10250 			txb->fragments[i] = skb;
10251 			tfd->u.data.chunk_ptr[i] =
10252 			    cpu_to_le32(dma_map_single(&priv->pci_dev->dev,
10253 						       skb->data,
10254 						       remaining_bytes,
10255 						       DMA_TO_DEVICE));
10256 
10257 			le32_add_cpu(&tfd->u.data.num_chunks, 1);
10258 		}
10259 	}
10260 
10261 	/* kick DMA */
10262 	q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10263 	ipw_write32(priv, q->reg_w, q->first_empty);
10264 
10265 	if (ipw_tx_queue_space(q) < q->high_mark)
10266 		netif_stop_queue(priv->net_dev);
10267 
10268 	return NETDEV_TX_OK;
10269 
10270       drop:
10271 	IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10272 	libipw_txb_free(txb);
10273 	return NETDEV_TX_OK;
10274 }
10275 
10276 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10277 {
10278 	struct ipw_priv *priv = libipw_priv(dev);
10279 #ifdef CONFIG_IPW2200_QOS
10280 	int tx_id = ipw_get_tx_queue_number(priv, pri);
10281 	struct clx2_tx_queue *txq = &priv->txq[tx_id];
10282 #else
10283 	struct clx2_tx_queue *txq = &priv->txq[0];
10284 #endif				/* CONFIG_IPW2200_QOS */
10285 
10286 	if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
10287 		return 1;
10288 
10289 	return 0;
10290 }
10291 
10292 #ifdef CONFIG_IPW2200_PROMISCUOUS
10293 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10294 				      struct libipw_txb *txb)
10295 {
10296 	struct libipw_rx_stats dummystats;
10297 	struct ieee80211_hdr *hdr;
10298 	u8 n;
10299 	u16 filter = priv->prom_priv->filter;
10300 	int hdr_only = 0;
10301 
10302 	if (filter & IPW_PROM_NO_TX)
10303 		return;
10304 
10305 	memset(&dummystats, 0, sizeof(dummystats));
10306 
10307 	/* Filtering of fragment chains is done against the first fragment */
10308 	hdr = (void *)txb->fragments[0]->data;
10309 	if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
10310 		if (filter & IPW_PROM_NO_MGMT)
10311 			return;
10312 		if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10313 			hdr_only = 1;
10314 	} else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
10315 		if (filter & IPW_PROM_NO_CTL)
10316 			return;
10317 		if (filter & IPW_PROM_CTL_HEADER_ONLY)
10318 			hdr_only = 1;
10319 	} else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
10320 		if (filter & IPW_PROM_NO_DATA)
10321 			return;
10322 		if (filter & IPW_PROM_DATA_HEADER_ONLY)
10323 			hdr_only = 1;
10324 	}
10325 
10326 	for(n=0; n<txb->nr_frags; ++n) {
10327 		struct sk_buff *src = txb->fragments[n];
10328 		struct sk_buff *dst;
10329 		struct ieee80211_radiotap_header *rt_hdr;
10330 		int len;
10331 
10332 		if (hdr_only) {
10333 			hdr = (void *)src->data;
10334 			len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
10335 		} else
10336 			len = src->len;
10337 
10338 		dst = alloc_skb(len + sizeof(*rt_hdr) + sizeof(u16)*2, GFP_ATOMIC);
10339 		if (!dst)
10340 			continue;
10341 
10342 		rt_hdr = skb_put(dst, sizeof(*rt_hdr));
10343 
10344 		rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10345 		rt_hdr->it_pad = 0;
10346 		rt_hdr->it_present = 0; /* after all, it's just an idea */
10347 		rt_hdr->it_present |=  cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
10348 
10349 		*(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10350 			ieee80211chan2mhz(priv->channel));
10351 		if (priv->channel > 14) 	/* 802.11a */
10352 			*(__le16*)skb_put(dst, sizeof(u16)) =
10353 				cpu_to_le16(IEEE80211_CHAN_OFDM |
10354 					     IEEE80211_CHAN_5GHZ);
10355 		else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10356 			*(__le16*)skb_put(dst, sizeof(u16)) =
10357 				cpu_to_le16(IEEE80211_CHAN_CCK |
10358 					     IEEE80211_CHAN_2GHZ);
10359 		else 		/* 802.11g */
10360 			*(__le16*)skb_put(dst, sizeof(u16)) =
10361 				cpu_to_le16(IEEE80211_CHAN_OFDM |
10362 				 IEEE80211_CHAN_2GHZ);
10363 
10364 		rt_hdr->it_len = cpu_to_le16(dst->len);
10365 
10366 		skb_copy_from_linear_data(src, skb_put(dst, len), len);
10367 
10368 		if (!libipw_rx(priv->prom_priv->ieee, dst, &dummystats))
10369 			dev_kfree_skb_any(dst);
10370 	}
10371 }
10372 #endif
10373 
10374 static netdev_tx_t ipw_net_hard_start_xmit(struct libipw_txb *txb,
10375 					   struct net_device *dev, int pri)
10376 {
10377 	struct ipw_priv *priv = libipw_priv(dev);
10378 	unsigned long flags;
10379 	netdev_tx_t ret;
10380 
10381 	IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10382 	spin_lock_irqsave(&priv->lock, flags);
10383 
10384 #ifdef CONFIG_IPW2200_PROMISCUOUS
10385 	if (rtap_iface && netif_running(priv->prom_net_dev))
10386 		ipw_handle_promiscuous_tx(priv, txb);
10387 #endif
10388 
10389 	ret = ipw_tx_skb(priv, txb, pri);
10390 	if (ret == NETDEV_TX_OK)
10391 		__ipw_led_activity_on(priv);
10392 	spin_unlock_irqrestore(&priv->lock, flags);
10393 
10394 	return ret;
10395 }
10396 
10397 static void ipw_net_set_multicast_list(struct net_device *dev)
10398 {
10399 
10400 }
10401 
10402 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10403 {
10404 	struct ipw_priv *priv = libipw_priv(dev);
10405 	struct sockaddr *addr = p;
10406 
10407 	if (!is_valid_ether_addr(addr->sa_data))
10408 		return -EADDRNOTAVAIL;
10409 	mutex_lock(&priv->mutex);
10410 	priv->config |= CFG_CUSTOM_MAC;
10411 	memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10412 	printk(KERN_INFO "%s: Setting MAC to %pM\n",
10413 	       priv->net_dev->name, priv->mac_addr);
10414 	schedule_work(&priv->adapter_restart);
10415 	mutex_unlock(&priv->mutex);
10416 	return 0;
10417 }
10418 
10419 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10420 				    struct ethtool_drvinfo *info)
10421 {
10422 	struct ipw_priv *p = libipw_priv(dev);
10423 	char vers[64];
10424 	char date[32];
10425 	u32 len;
10426 
10427 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
10428 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
10429 
10430 	len = sizeof(vers);
10431 	ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10432 	len = sizeof(date);
10433 	ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10434 
10435 	snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10436 		 vers, date);
10437 	strlcpy(info->bus_info, pci_name(p->pci_dev),
10438 		sizeof(info->bus_info));
10439 }
10440 
10441 static u32 ipw_ethtool_get_link(struct net_device *dev)
10442 {
10443 	struct ipw_priv *priv = libipw_priv(dev);
10444 	return (priv->status & STATUS_ASSOCIATED) != 0;
10445 }
10446 
10447 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10448 {
10449 	return IPW_EEPROM_IMAGE_SIZE;
10450 }
10451 
10452 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10453 				  struct ethtool_eeprom *eeprom, u8 * bytes)
10454 {
10455 	struct ipw_priv *p = libipw_priv(dev);
10456 
10457 	if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10458 		return -EINVAL;
10459 	mutex_lock(&p->mutex);
10460 	memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10461 	mutex_unlock(&p->mutex);
10462 	return 0;
10463 }
10464 
10465 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10466 				  struct ethtool_eeprom *eeprom, u8 * bytes)
10467 {
10468 	struct ipw_priv *p = libipw_priv(dev);
10469 	int i;
10470 
10471 	if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10472 		return -EINVAL;
10473 	mutex_lock(&p->mutex);
10474 	memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10475 	for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10476 		ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10477 	mutex_unlock(&p->mutex);
10478 	return 0;
10479 }
10480 
10481 static const struct ethtool_ops ipw_ethtool_ops = {
10482 	.get_link = ipw_ethtool_get_link,
10483 	.get_drvinfo = ipw_ethtool_get_drvinfo,
10484 	.get_eeprom_len = ipw_ethtool_get_eeprom_len,
10485 	.get_eeprom = ipw_ethtool_get_eeprom,
10486 	.set_eeprom = ipw_ethtool_set_eeprom,
10487 };
10488 
10489 static irqreturn_t ipw_isr(int irq, void *data)
10490 {
10491 	struct ipw_priv *priv = data;
10492 	u32 inta, inta_mask;
10493 
10494 	if (!priv)
10495 		return IRQ_NONE;
10496 
10497 	spin_lock(&priv->irq_lock);
10498 
10499 	if (!(priv->status & STATUS_INT_ENABLED)) {
10500 		/* IRQ is disabled */
10501 		goto none;
10502 	}
10503 
10504 	inta = ipw_read32(priv, IPW_INTA_RW);
10505 	inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10506 
10507 	if (inta == 0xFFFFFFFF) {
10508 		/* Hardware disappeared */
10509 		IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10510 		goto none;
10511 	}
10512 
10513 	if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10514 		/* Shared interrupt */
10515 		goto none;
10516 	}
10517 
10518 	/* tell the device to stop sending interrupts */
10519 	__ipw_disable_interrupts(priv);
10520 
10521 	/* ack current interrupts */
10522 	inta &= (IPW_INTA_MASK_ALL & inta_mask);
10523 	ipw_write32(priv, IPW_INTA_RW, inta);
10524 
10525 	/* Cache INTA value for our tasklet */
10526 	priv->isr_inta = inta;
10527 
10528 	tasklet_schedule(&priv->irq_tasklet);
10529 
10530 	spin_unlock(&priv->irq_lock);
10531 
10532 	return IRQ_HANDLED;
10533       none:
10534 	spin_unlock(&priv->irq_lock);
10535 	return IRQ_NONE;
10536 }
10537 
10538 static void ipw_rf_kill(void *adapter)
10539 {
10540 	struct ipw_priv *priv = adapter;
10541 	unsigned long flags;
10542 
10543 	spin_lock_irqsave(&priv->lock, flags);
10544 
10545 	if (rf_kill_active(priv)) {
10546 		IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10547 		schedule_delayed_work(&priv->rf_kill, 2 * HZ);
10548 		goto exit_unlock;
10549 	}
10550 
10551 	/* RF Kill is now disabled, so bring the device back up */
10552 
10553 	if (!(priv->status & STATUS_RF_KILL_MASK)) {
10554 		IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10555 				  "device\n");
10556 
10557 		/* we can not do an adapter restart while inside an irq lock */
10558 		schedule_work(&priv->adapter_restart);
10559 	} else
10560 		IPW_DEBUG_RF_KILL("HW RF Kill deactivated.  SW RF Kill still "
10561 				  "enabled\n");
10562 
10563       exit_unlock:
10564 	spin_unlock_irqrestore(&priv->lock, flags);
10565 }
10566 
10567 static void ipw_bg_rf_kill(struct work_struct *work)
10568 {
10569 	struct ipw_priv *priv =
10570 		container_of(work, struct ipw_priv, rf_kill.work);
10571 	mutex_lock(&priv->mutex);
10572 	ipw_rf_kill(priv);
10573 	mutex_unlock(&priv->mutex);
10574 }
10575 
10576 static void ipw_link_up(struct ipw_priv *priv)
10577 {
10578 	priv->last_seq_num = -1;
10579 	priv->last_frag_num = -1;
10580 	priv->last_packet_time = 0;
10581 
10582 	netif_carrier_on(priv->net_dev);
10583 
10584 	cancel_delayed_work(&priv->request_scan);
10585 	cancel_delayed_work(&priv->request_direct_scan);
10586 	cancel_delayed_work(&priv->request_passive_scan);
10587 	cancel_delayed_work(&priv->scan_event);
10588 	ipw_reset_stats(priv);
10589 	/* Ensure the rate is updated immediately */
10590 	priv->last_rate = ipw_get_current_rate(priv);
10591 	ipw_gather_stats(priv);
10592 	ipw_led_link_up(priv);
10593 	notify_wx_assoc_event(priv);
10594 
10595 	if (priv->config & CFG_BACKGROUND_SCAN)
10596 		schedule_delayed_work(&priv->request_scan, HZ);
10597 }
10598 
10599 static void ipw_bg_link_up(struct work_struct *work)
10600 {
10601 	struct ipw_priv *priv =
10602 		container_of(work, struct ipw_priv, link_up);
10603 	mutex_lock(&priv->mutex);
10604 	ipw_link_up(priv);
10605 	mutex_unlock(&priv->mutex);
10606 }
10607 
10608 static void ipw_link_down(struct ipw_priv *priv)
10609 {
10610 	ipw_led_link_down(priv);
10611 	netif_carrier_off(priv->net_dev);
10612 	notify_wx_assoc_event(priv);
10613 
10614 	/* Cancel any queued work ... */
10615 	cancel_delayed_work(&priv->request_scan);
10616 	cancel_delayed_work(&priv->request_direct_scan);
10617 	cancel_delayed_work(&priv->request_passive_scan);
10618 	cancel_delayed_work(&priv->adhoc_check);
10619 	cancel_delayed_work(&priv->gather_stats);
10620 
10621 	ipw_reset_stats(priv);
10622 
10623 	if (!(priv->status & STATUS_EXIT_PENDING)) {
10624 		/* Queue up another scan... */
10625 		schedule_delayed_work(&priv->request_scan, 0);
10626 	} else
10627 		cancel_delayed_work(&priv->scan_event);
10628 }
10629 
10630 static void ipw_bg_link_down(struct work_struct *work)
10631 {
10632 	struct ipw_priv *priv =
10633 		container_of(work, struct ipw_priv, link_down);
10634 	mutex_lock(&priv->mutex);
10635 	ipw_link_down(priv);
10636 	mutex_unlock(&priv->mutex);
10637 }
10638 
10639 static void ipw_setup_deferred_work(struct ipw_priv *priv)
10640 {
10641 	init_waitqueue_head(&priv->wait_command_queue);
10642 	init_waitqueue_head(&priv->wait_state);
10643 
10644 	INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10645 	INIT_WORK(&priv->associate, ipw_bg_associate);
10646 	INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10647 	INIT_WORK(&priv->system_config, ipw_system_config);
10648 	INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10649 	INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10650 	INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10651 	INIT_WORK(&priv->up, ipw_bg_up);
10652 	INIT_WORK(&priv->down, ipw_bg_down);
10653 	INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10654 	INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan);
10655 	INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10656 	INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
10657 	INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10658 	INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10659 	INIT_WORK(&priv->roam, ipw_bg_roam);
10660 	INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10661 	INIT_WORK(&priv->link_up, ipw_bg_link_up);
10662 	INIT_WORK(&priv->link_down, ipw_bg_link_down);
10663 	INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10664 	INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10665 	INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10666 	INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10667 
10668 #ifdef CONFIG_IPW2200_QOS
10669 	INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10670 #endif				/* CONFIG_IPW2200_QOS */
10671 
10672 	tasklet_setup(&priv->irq_tasklet, ipw_irq_tasklet);
10673 }
10674 
10675 static void shim__set_security(struct net_device *dev,
10676 			       struct libipw_security *sec)
10677 {
10678 	struct ipw_priv *priv = libipw_priv(dev);
10679 	int i;
10680 	for (i = 0; i < 4; i++) {
10681 		if (sec->flags & (1 << i)) {
10682 			priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10683 			priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10684 			if (sec->key_sizes[i] == 0)
10685 				priv->ieee->sec.flags &= ~(1 << i);
10686 			else {
10687 				memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10688 				       sec->key_sizes[i]);
10689 				priv->ieee->sec.flags |= (1 << i);
10690 			}
10691 			priv->status |= STATUS_SECURITY_UPDATED;
10692 		} else if (sec->level != SEC_LEVEL_1)
10693 			priv->ieee->sec.flags &= ~(1 << i);
10694 	}
10695 
10696 	if (sec->flags & SEC_ACTIVE_KEY) {
10697 		priv->ieee->sec.active_key = sec->active_key;
10698 		priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10699 		priv->status |= STATUS_SECURITY_UPDATED;
10700 	} else
10701 		priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10702 
10703 	if ((sec->flags & SEC_AUTH_MODE) &&
10704 	    (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10705 		priv->ieee->sec.auth_mode = sec->auth_mode;
10706 		priv->ieee->sec.flags |= SEC_AUTH_MODE;
10707 		if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10708 			priv->capability |= CAP_SHARED_KEY;
10709 		else
10710 			priv->capability &= ~CAP_SHARED_KEY;
10711 		priv->status |= STATUS_SECURITY_UPDATED;
10712 	}
10713 
10714 	if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10715 		priv->ieee->sec.flags |= SEC_ENABLED;
10716 		priv->ieee->sec.enabled = sec->enabled;
10717 		priv->status |= STATUS_SECURITY_UPDATED;
10718 		if (sec->enabled)
10719 			priv->capability |= CAP_PRIVACY_ON;
10720 		else
10721 			priv->capability &= ~CAP_PRIVACY_ON;
10722 	}
10723 
10724 	if (sec->flags & SEC_ENCRYPT)
10725 		priv->ieee->sec.encrypt = sec->encrypt;
10726 
10727 	if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10728 		priv->ieee->sec.level = sec->level;
10729 		priv->ieee->sec.flags |= SEC_LEVEL;
10730 		priv->status |= STATUS_SECURITY_UPDATED;
10731 	}
10732 
10733 	if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10734 		ipw_set_hwcrypto_keys(priv);
10735 
10736 	/* To match current functionality of ipw2100 (which works well w/
10737 	 * various supplicants, we don't force a disassociate if the
10738 	 * privacy capability changes ... */
10739 #if 0
10740 	if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10741 	    (((priv->assoc_request.capability &
10742 	       cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) ||
10743 	     (!(priv->assoc_request.capability &
10744 		cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) {
10745 		IPW_DEBUG_ASSOC("Disassociating due to capability "
10746 				"change.\n");
10747 		ipw_disassociate(priv);
10748 	}
10749 #endif
10750 }
10751 
10752 static int init_supported_rates(struct ipw_priv *priv,
10753 				struct ipw_supported_rates *rates)
10754 {
10755 	/* TODO: Mask out rates based on priv->rates_mask */
10756 
10757 	memset(rates, 0, sizeof(*rates));
10758 	/* configure supported rates */
10759 	switch (priv->ieee->freq_band) {
10760 	case LIBIPW_52GHZ_BAND:
10761 		rates->ieee_mode = IPW_A_MODE;
10762 		rates->purpose = IPW_RATE_CAPABILITIES;
10763 		ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10764 					LIBIPW_OFDM_DEFAULT_RATES_MASK);
10765 		break;
10766 
10767 	default:		/* Mixed or 2.4Ghz */
10768 		rates->ieee_mode = IPW_G_MODE;
10769 		rates->purpose = IPW_RATE_CAPABILITIES;
10770 		ipw_add_cck_scan_rates(rates, LIBIPW_CCK_MODULATION,
10771 				       LIBIPW_CCK_DEFAULT_RATES_MASK);
10772 		if (priv->ieee->modulation & LIBIPW_OFDM_MODULATION) {
10773 			ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10774 						LIBIPW_OFDM_DEFAULT_RATES_MASK);
10775 		}
10776 		break;
10777 	}
10778 
10779 	return 0;
10780 }
10781 
10782 static int ipw_config(struct ipw_priv *priv)
10783 {
10784 	/* This is only called from ipw_up, which resets/reloads the firmware
10785 	   so, we don't need to first disable the card before we configure
10786 	   it */
10787 	if (ipw_set_tx_power(priv))
10788 		goto error;
10789 
10790 	/* initialize adapter address */
10791 	if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10792 		goto error;
10793 
10794 	/* set basic system config settings */
10795 	init_sys_config(&priv->sys_config);
10796 
10797 	/* Support Bluetooth if we have BT h/w on board, and user wants to.
10798 	 * Does not support BT priority yet (don't abort or defer our Tx) */
10799 	if (bt_coexist) {
10800 		unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10801 
10802 		if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10803 			priv->sys_config.bt_coexistence
10804 			    |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10805 		if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10806 			priv->sys_config.bt_coexistence
10807 			    |= CFG_BT_COEXISTENCE_OOB;
10808 	}
10809 
10810 #ifdef CONFIG_IPW2200_PROMISCUOUS
10811 	if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10812 		priv->sys_config.accept_all_data_frames = 1;
10813 		priv->sys_config.accept_non_directed_frames = 1;
10814 		priv->sys_config.accept_all_mgmt_bcpr = 1;
10815 		priv->sys_config.accept_all_mgmt_frames = 1;
10816 	}
10817 #endif
10818 
10819 	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10820 		priv->sys_config.answer_broadcast_ssid_probe = 1;
10821 	else
10822 		priv->sys_config.answer_broadcast_ssid_probe = 0;
10823 
10824 	if (ipw_send_system_config(priv))
10825 		goto error;
10826 
10827 	init_supported_rates(priv, &priv->rates);
10828 	if (ipw_send_supported_rates(priv, &priv->rates))
10829 		goto error;
10830 
10831 	/* Set request-to-send threshold */
10832 	if (priv->rts_threshold) {
10833 		if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10834 			goto error;
10835 	}
10836 #ifdef CONFIG_IPW2200_QOS
10837 	IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10838 	ipw_qos_activate(priv, NULL);
10839 #endif				/* CONFIG_IPW2200_QOS */
10840 
10841 	if (ipw_set_random_seed(priv))
10842 		goto error;
10843 
10844 	/* final state transition to the RUN state */
10845 	if (ipw_send_host_complete(priv))
10846 		goto error;
10847 
10848 	priv->status |= STATUS_INIT;
10849 
10850 	ipw_led_init(priv);
10851 	ipw_led_radio_on(priv);
10852 	priv->notif_missed_beacons = 0;
10853 
10854 	/* Set hardware WEP key if it is configured. */
10855 	if ((priv->capability & CAP_PRIVACY_ON) &&
10856 	    (priv->ieee->sec.level == SEC_LEVEL_1) &&
10857 	    !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10858 		ipw_set_hwcrypto_keys(priv);
10859 
10860 	return 0;
10861 
10862       error:
10863 	return -EIO;
10864 }
10865 
10866 /*
10867  * NOTE:
10868  *
10869  * These tables have been tested in conjunction with the
10870  * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10871  *
10872  * Altering this values, using it on other hardware, or in geographies
10873  * not intended for resale of the above mentioned Intel adapters has
10874  * not been tested.
10875  *
10876  * Remember to update the table in README.ipw2200 when changing this
10877  * table.
10878  *
10879  */
10880 static const struct libipw_geo ipw_geos[] = {
10881 	{			/* Restricted */
10882 	 "---",
10883 	 .bg_channels = 11,
10884 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10885 		{2427, 4}, {2432, 5}, {2437, 6},
10886 		{2442, 7}, {2447, 8}, {2452, 9},
10887 		{2457, 10}, {2462, 11}},
10888 	 },
10889 
10890 	{			/* Custom US/Canada */
10891 	 "ZZF",
10892 	 .bg_channels = 11,
10893 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10894 		{2427, 4}, {2432, 5}, {2437, 6},
10895 		{2442, 7}, {2447, 8}, {2452, 9},
10896 		{2457, 10}, {2462, 11}},
10897 	 .a_channels = 8,
10898 	 .a = {{5180, 36},
10899 	       {5200, 40},
10900 	       {5220, 44},
10901 	       {5240, 48},
10902 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
10903 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
10904 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
10905 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY}},
10906 	 },
10907 
10908 	{			/* Rest of World */
10909 	 "ZZD",
10910 	 .bg_channels = 13,
10911 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10912 		{2427, 4}, {2432, 5}, {2437, 6},
10913 		{2442, 7}, {2447, 8}, {2452, 9},
10914 		{2457, 10}, {2462, 11}, {2467, 12},
10915 		{2472, 13}},
10916 	 },
10917 
10918 	{			/* Custom USA & Europe & High */
10919 	 "ZZA",
10920 	 .bg_channels = 11,
10921 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10922 		{2427, 4}, {2432, 5}, {2437, 6},
10923 		{2442, 7}, {2447, 8}, {2452, 9},
10924 		{2457, 10}, {2462, 11}},
10925 	 .a_channels = 13,
10926 	 .a = {{5180, 36},
10927 	       {5200, 40},
10928 	       {5220, 44},
10929 	       {5240, 48},
10930 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
10931 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
10932 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
10933 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
10934 	       {5745, 149},
10935 	       {5765, 153},
10936 	       {5785, 157},
10937 	       {5805, 161},
10938 	       {5825, 165}},
10939 	 },
10940 
10941 	{			/* Custom NA & Europe */
10942 	 "ZZB",
10943 	 .bg_channels = 11,
10944 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10945 		{2427, 4}, {2432, 5}, {2437, 6},
10946 		{2442, 7}, {2447, 8}, {2452, 9},
10947 		{2457, 10}, {2462, 11}},
10948 	 .a_channels = 13,
10949 	 .a = {{5180, 36},
10950 	       {5200, 40},
10951 	       {5220, 44},
10952 	       {5240, 48},
10953 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
10954 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
10955 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
10956 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
10957 	       {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
10958 	       {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
10959 	       {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
10960 	       {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
10961 	       {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
10962 	 },
10963 
10964 	{			/* Custom Japan */
10965 	 "ZZC",
10966 	 .bg_channels = 11,
10967 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10968 		{2427, 4}, {2432, 5}, {2437, 6},
10969 		{2442, 7}, {2447, 8}, {2452, 9},
10970 		{2457, 10}, {2462, 11}},
10971 	 .a_channels = 4,
10972 	 .a = {{5170, 34}, {5190, 38},
10973 	       {5210, 42}, {5230, 46}},
10974 	 },
10975 
10976 	{			/* Custom */
10977 	 "ZZM",
10978 	 .bg_channels = 11,
10979 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10980 		{2427, 4}, {2432, 5}, {2437, 6},
10981 		{2442, 7}, {2447, 8}, {2452, 9},
10982 		{2457, 10}, {2462, 11}},
10983 	 },
10984 
10985 	{			/* Europe */
10986 	 "ZZE",
10987 	 .bg_channels = 13,
10988 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10989 		{2427, 4}, {2432, 5}, {2437, 6},
10990 		{2442, 7}, {2447, 8}, {2452, 9},
10991 		{2457, 10}, {2462, 11}, {2467, 12},
10992 		{2472, 13}},
10993 	 .a_channels = 19,
10994 	 .a = {{5180, 36},
10995 	       {5200, 40},
10996 	       {5220, 44},
10997 	       {5240, 48},
10998 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
10999 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11000 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11001 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11002 	       {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11003 	       {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11004 	       {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11005 	       {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11006 	       {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11007 	       {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11008 	       {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11009 	       {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11010 	       {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11011 	       {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11012 	       {5700, 140, LIBIPW_CH_PASSIVE_ONLY}},
11013 	 },
11014 
11015 	{			/* Custom Japan */
11016 	 "ZZJ",
11017 	 .bg_channels = 14,
11018 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11019 		{2427, 4}, {2432, 5}, {2437, 6},
11020 		{2442, 7}, {2447, 8}, {2452, 9},
11021 		{2457, 10}, {2462, 11}, {2467, 12},
11022 		{2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY}},
11023 	 .a_channels = 4,
11024 	 .a = {{5170, 34}, {5190, 38},
11025 	       {5210, 42}, {5230, 46}},
11026 	 },
11027 
11028 	{			/* Rest of World */
11029 	 "ZZR",
11030 	 .bg_channels = 14,
11031 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11032 		{2427, 4}, {2432, 5}, {2437, 6},
11033 		{2442, 7}, {2447, 8}, {2452, 9},
11034 		{2457, 10}, {2462, 11}, {2467, 12},
11035 		{2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY |
11036 			     LIBIPW_CH_PASSIVE_ONLY}},
11037 	 },
11038 
11039 	{			/* High Band */
11040 	 "ZZH",
11041 	 .bg_channels = 13,
11042 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11043 		{2427, 4}, {2432, 5}, {2437, 6},
11044 		{2442, 7}, {2447, 8}, {2452, 9},
11045 		{2457, 10}, {2462, 11},
11046 		{2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11047 		{2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11048 	 .a_channels = 4,
11049 	 .a = {{5745, 149}, {5765, 153},
11050 	       {5785, 157}, {5805, 161}},
11051 	 },
11052 
11053 	{			/* Custom Europe */
11054 	 "ZZG",
11055 	 .bg_channels = 13,
11056 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11057 		{2427, 4}, {2432, 5}, {2437, 6},
11058 		{2442, 7}, {2447, 8}, {2452, 9},
11059 		{2457, 10}, {2462, 11},
11060 		{2467, 12}, {2472, 13}},
11061 	 .a_channels = 4,
11062 	 .a = {{5180, 36}, {5200, 40},
11063 	       {5220, 44}, {5240, 48}},
11064 	 },
11065 
11066 	{			/* Europe */
11067 	 "ZZK",
11068 	 .bg_channels = 13,
11069 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11070 		{2427, 4}, {2432, 5}, {2437, 6},
11071 		{2442, 7}, {2447, 8}, {2452, 9},
11072 		{2457, 10}, {2462, 11},
11073 		{2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11074 		{2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11075 	 .a_channels = 24,
11076 	 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11077 	       {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11078 	       {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11079 	       {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11080 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11081 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11082 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11083 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11084 	       {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11085 	       {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11086 	       {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11087 	       {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11088 	       {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11089 	       {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11090 	       {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11091 	       {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11092 	       {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11093 	       {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11094 	       {5700, 140, LIBIPW_CH_PASSIVE_ONLY},
11095 	       {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11096 	       {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11097 	       {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11098 	       {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11099 	       {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11100 	 },
11101 
11102 	{			/* Europe */
11103 	 "ZZL",
11104 	 .bg_channels = 11,
11105 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11106 		{2427, 4}, {2432, 5}, {2437, 6},
11107 		{2442, 7}, {2447, 8}, {2452, 9},
11108 		{2457, 10}, {2462, 11}},
11109 	 .a_channels = 13,
11110 	 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11111 	       {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11112 	       {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11113 	       {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11114 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11115 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11116 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11117 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11118 	       {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11119 	       {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11120 	       {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11121 	       {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11122 	       {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11123 	 }
11124 };
11125 
11126 static void ipw_set_geo(struct ipw_priv *priv)
11127 {
11128 	int j;
11129 
11130 	for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11131 		if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11132 			    ipw_geos[j].name, 3))
11133 			break;
11134 	}
11135 
11136 	if (j == ARRAY_SIZE(ipw_geos)) {
11137 		IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11138 			    priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11139 			    priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11140 			    priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11141 		j = 0;
11142 	}
11143 
11144 	libipw_set_geo(priv->ieee, &ipw_geos[j]);
11145 }
11146 
11147 #define MAX_HW_RESTARTS 5
11148 static int ipw_up(struct ipw_priv *priv)
11149 {
11150 	int rc, i;
11151 
11152 	/* Age scan list entries found before suspend */
11153 	if (priv->suspend_time) {
11154 		libipw_networks_age(priv->ieee, priv->suspend_time);
11155 		priv->suspend_time = 0;
11156 	}
11157 
11158 	if (priv->status & STATUS_EXIT_PENDING)
11159 		return -EIO;
11160 
11161 	if (cmdlog && !priv->cmdlog) {
11162 		priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11163 				       GFP_KERNEL);
11164 		if (priv->cmdlog == NULL) {
11165 			IPW_ERROR("Error allocating %d command log entries.\n",
11166 				  cmdlog);
11167 			return -ENOMEM;
11168 		} else {
11169 			priv->cmdlog_len = cmdlog;
11170 		}
11171 	}
11172 
11173 	for (i = 0; i < MAX_HW_RESTARTS; i++) {
11174 		/* Load the microcode, firmware, and eeprom.
11175 		 * Also start the clocks. */
11176 		rc = ipw_load(priv);
11177 		if (rc) {
11178 			IPW_ERROR("Unable to load firmware: %d\n", rc);
11179 			return rc;
11180 		}
11181 
11182 		ipw_init_ordinals(priv);
11183 		if (!(priv->config & CFG_CUSTOM_MAC))
11184 			eeprom_parse_mac(priv, priv->mac_addr);
11185 		eth_hw_addr_set(priv->net_dev, priv->mac_addr);
11186 
11187 		ipw_set_geo(priv);
11188 
11189 		if (priv->status & STATUS_RF_KILL_SW) {
11190 			IPW_WARNING("Radio disabled by module parameter.\n");
11191 			return 0;
11192 		} else if (rf_kill_active(priv)) {
11193 			IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11194 				    "Kill switch must be turned off for "
11195 				    "wireless networking to work.\n");
11196 			schedule_delayed_work(&priv->rf_kill, 2 * HZ);
11197 			return 0;
11198 		}
11199 
11200 		rc = ipw_config(priv);
11201 		if (!rc) {
11202 			IPW_DEBUG_INFO("Configured device on count %i\n", i);
11203 
11204 			/* If configure to try and auto-associate, kick
11205 			 * off a scan. */
11206 			schedule_delayed_work(&priv->request_scan, 0);
11207 
11208 			return 0;
11209 		}
11210 
11211 		IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11212 		IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11213 			       i, MAX_HW_RESTARTS);
11214 
11215 		/* We had an error bringing up the hardware, so take it
11216 		 * all the way back down so we can try again */
11217 		ipw_down(priv);
11218 	}
11219 
11220 	/* tried to restart and config the device for as long as our
11221 	 * patience could withstand */
11222 	IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11223 
11224 	return -EIO;
11225 }
11226 
11227 static void ipw_bg_up(struct work_struct *work)
11228 {
11229 	struct ipw_priv *priv =
11230 		container_of(work, struct ipw_priv, up);
11231 	mutex_lock(&priv->mutex);
11232 	ipw_up(priv);
11233 	mutex_unlock(&priv->mutex);
11234 }
11235 
11236 static void ipw_deinit(struct ipw_priv *priv)
11237 {
11238 	int i;
11239 
11240 	if (priv->status & STATUS_SCANNING) {
11241 		IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11242 		ipw_abort_scan(priv);
11243 	}
11244 
11245 	if (priv->status & STATUS_ASSOCIATED) {
11246 		IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11247 		ipw_disassociate(priv);
11248 	}
11249 
11250 	ipw_led_shutdown(priv);
11251 
11252 	/* Wait up to 1s for status to change to not scanning and not
11253 	 * associated (disassociation can take a while for a ful 802.11
11254 	 * exchange */
11255 	for (i = 1000; i && (priv->status &
11256 			     (STATUS_DISASSOCIATING |
11257 			      STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11258 		udelay(10);
11259 
11260 	if (priv->status & (STATUS_DISASSOCIATING |
11261 			    STATUS_ASSOCIATED | STATUS_SCANNING))
11262 		IPW_DEBUG_INFO("Still associated or scanning...\n");
11263 	else
11264 		IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11265 
11266 	/* Attempt to disable the card */
11267 	ipw_send_card_disable(priv, 0);
11268 
11269 	priv->status &= ~STATUS_INIT;
11270 }
11271 
11272 static void ipw_down(struct ipw_priv *priv)
11273 {
11274 	int exit_pending = priv->status & STATUS_EXIT_PENDING;
11275 
11276 	priv->status |= STATUS_EXIT_PENDING;
11277 
11278 	if (ipw_is_init(priv))
11279 		ipw_deinit(priv);
11280 
11281 	/* Wipe out the EXIT_PENDING status bit if we are not actually
11282 	 * exiting the module */
11283 	if (!exit_pending)
11284 		priv->status &= ~STATUS_EXIT_PENDING;
11285 
11286 	/* tell the device to stop sending interrupts */
11287 	ipw_disable_interrupts(priv);
11288 
11289 	/* Clear all bits but the RF Kill */
11290 	priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11291 	netif_carrier_off(priv->net_dev);
11292 
11293 	ipw_stop_nic(priv);
11294 
11295 	ipw_led_radio_off(priv);
11296 }
11297 
11298 static void ipw_bg_down(struct work_struct *work)
11299 {
11300 	struct ipw_priv *priv =
11301 		container_of(work, struct ipw_priv, down);
11302 	mutex_lock(&priv->mutex);
11303 	ipw_down(priv);
11304 	mutex_unlock(&priv->mutex);
11305 }
11306 
11307 static int ipw_wdev_init(struct net_device *dev)
11308 {
11309 	int i, rc = 0;
11310 	struct ipw_priv *priv = libipw_priv(dev);
11311 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
11312 	struct wireless_dev *wdev = &priv->ieee->wdev;
11313 
11314 	memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
11315 
11316 	/* fill-out priv->ieee->bg_band */
11317 	if (geo->bg_channels) {
11318 		struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
11319 
11320 		bg_band->band = NL80211_BAND_2GHZ;
11321 		bg_band->n_channels = geo->bg_channels;
11322 		bg_band->channels = kcalloc(geo->bg_channels,
11323 					    sizeof(struct ieee80211_channel),
11324 					    GFP_KERNEL);
11325 		if (!bg_band->channels) {
11326 			rc = -ENOMEM;
11327 			goto out;
11328 		}
11329 		/* translate geo->bg to bg_band.channels */
11330 		for (i = 0; i < geo->bg_channels; i++) {
11331 			bg_band->channels[i].band = NL80211_BAND_2GHZ;
11332 			bg_band->channels[i].center_freq = geo->bg[i].freq;
11333 			bg_band->channels[i].hw_value = geo->bg[i].channel;
11334 			bg_band->channels[i].max_power = geo->bg[i].max_power;
11335 			if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11336 				bg_band->channels[i].flags |=
11337 					IEEE80211_CHAN_NO_IR;
11338 			if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
11339 				bg_band->channels[i].flags |=
11340 					IEEE80211_CHAN_NO_IR;
11341 			if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
11342 				bg_band->channels[i].flags |=
11343 					IEEE80211_CHAN_RADAR;
11344 			/* No equivalent for LIBIPW_CH_80211H_RULES,
11345 			   LIBIPW_CH_UNIFORM_SPREADING, or
11346 			   LIBIPW_CH_B_ONLY... */
11347 		}
11348 		/* point at bitrate info */
11349 		bg_band->bitrates = ipw2200_bg_rates;
11350 		bg_band->n_bitrates = ipw2200_num_bg_rates;
11351 
11352 		wdev->wiphy->bands[NL80211_BAND_2GHZ] = bg_band;
11353 	}
11354 
11355 	/* fill-out priv->ieee->a_band */
11356 	if (geo->a_channels) {
11357 		struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
11358 
11359 		a_band->band = NL80211_BAND_5GHZ;
11360 		a_band->n_channels = geo->a_channels;
11361 		a_band->channels = kcalloc(geo->a_channels,
11362 					   sizeof(struct ieee80211_channel),
11363 					   GFP_KERNEL);
11364 		if (!a_band->channels) {
11365 			rc = -ENOMEM;
11366 			goto out;
11367 		}
11368 		/* translate geo->a to a_band.channels */
11369 		for (i = 0; i < geo->a_channels; i++) {
11370 			a_band->channels[i].band = NL80211_BAND_5GHZ;
11371 			a_band->channels[i].center_freq = geo->a[i].freq;
11372 			a_band->channels[i].hw_value = geo->a[i].channel;
11373 			a_band->channels[i].max_power = geo->a[i].max_power;
11374 			if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11375 				a_band->channels[i].flags |=
11376 					IEEE80211_CHAN_NO_IR;
11377 			if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
11378 				a_band->channels[i].flags |=
11379 					IEEE80211_CHAN_NO_IR;
11380 			if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
11381 				a_band->channels[i].flags |=
11382 					IEEE80211_CHAN_RADAR;
11383 			/* No equivalent for LIBIPW_CH_80211H_RULES,
11384 			   LIBIPW_CH_UNIFORM_SPREADING, or
11385 			   LIBIPW_CH_B_ONLY... */
11386 		}
11387 		/* point at bitrate info */
11388 		a_band->bitrates = ipw2200_a_rates;
11389 		a_band->n_bitrates = ipw2200_num_a_rates;
11390 
11391 		wdev->wiphy->bands[NL80211_BAND_5GHZ] = a_band;
11392 	}
11393 
11394 	wdev->wiphy->cipher_suites = ipw_cipher_suites;
11395 	wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
11396 
11397 	set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
11398 
11399 	/* With that information in place, we can now register the wiphy... */
11400 	if (wiphy_register(wdev->wiphy))
11401 		rc = -EIO;
11402 out:
11403 	return rc;
11404 }
11405 
11406 /* PCI driver stuff */
11407 static const struct pci_device_id card_ids[] = {
11408 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11409 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11410 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11411 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11412 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11413 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11414 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11415 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11416 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11417 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11418 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11419 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11420 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11421 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11422 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11423 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11424 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11425 	{PCI_VDEVICE(INTEL, 0x104f), 0},
11426 	{PCI_VDEVICE(INTEL, 0x4220), 0},	/* BG */
11427 	{PCI_VDEVICE(INTEL, 0x4221), 0},	/* BG */
11428 	{PCI_VDEVICE(INTEL, 0x4223), 0},	/* ABG */
11429 	{PCI_VDEVICE(INTEL, 0x4224), 0},	/* ABG */
11430 
11431 	/* required last entry */
11432 	{0,}
11433 };
11434 
11435 MODULE_DEVICE_TABLE(pci, card_ids);
11436 
11437 static struct attribute *ipw_sysfs_entries[] = {
11438 	&dev_attr_rf_kill.attr,
11439 	&dev_attr_direct_dword.attr,
11440 	&dev_attr_indirect_byte.attr,
11441 	&dev_attr_indirect_dword.attr,
11442 	&dev_attr_mem_gpio_reg.attr,
11443 	&dev_attr_command_event_reg.attr,
11444 	&dev_attr_nic_type.attr,
11445 	&dev_attr_status.attr,
11446 	&dev_attr_cfg.attr,
11447 	&dev_attr_error.attr,
11448 	&dev_attr_event_log.attr,
11449 	&dev_attr_cmd_log.attr,
11450 	&dev_attr_eeprom_delay.attr,
11451 	&dev_attr_ucode_version.attr,
11452 	&dev_attr_rtc.attr,
11453 	&dev_attr_scan_age.attr,
11454 	&dev_attr_led.attr,
11455 	&dev_attr_speed_scan.attr,
11456 	&dev_attr_net_stats.attr,
11457 	&dev_attr_channels.attr,
11458 #ifdef CONFIG_IPW2200_PROMISCUOUS
11459 	&dev_attr_rtap_iface.attr,
11460 	&dev_attr_rtap_filter.attr,
11461 #endif
11462 	NULL
11463 };
11464 
11465 static const struct attribute_group ipw_attribute_group = {
11466 	.name = NULL,		/* put in device directory */
11467 	.attrs = ipw_sysfs_entries,
11468 };
11469 
11470 #ifdef CONFIG_IPW2200_PROMISCUOUS
11471 static int ipw_prom_open(struct net_device *dev)
11472 {
11473 	struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11474 	struct ipw_priv *priv = prom_priv->priv;
11475 
11476 	IPW_DEBUG_INFO("prom dev->open\n");
11477 	netif_carrier_off(dev);
11478 
11479 	if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11480 		priv->sys_config.accept_all_data_frames = 1;
11481 		priv->sys_config.accept_non_directed_frames = 1;
11482 		priv->sys_config.accept_all_mgmt_bcpr = 1;
11483 		priv->sys_config.accept_all_mgmt_frames = 1;
11484 
11485 		ipw_send_system_config(priv);
11486 	}
11487 
11488 	return 0;
11489 }
11490 
11491 static int ipw_prom_stop(struct net_device *dev)
11492 {
11493 	struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11494 	struct ipw_priv *priv = prom_priv->priv;
11495 
11496 	IPW_DEBUG_INFO("prom dev->stop\n");
11497 
11498 	if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11499 		priv->sys_config.accept_all_data_frames = 0;
11500 		priv->sys_config.accept_non_directed_frames = 0;
11501 		priv->sys_config.accept_all_mgmt_bcpr = 0;
11502 		priv->sys_config.accept_all_mgmt_frames = 0;
11503 
11504 		ipw_send_system_config(priv);
11505 	}
11506 
11507 	return 0;
11508 }
11509 
11510 static netdev_tx_t ipw_prom_hard_start_xmit(struct sk_buff *skb,
11511 					    struct net_device *dev)
11512 {
11513 	IPW_DEBUG_INFO("prom dev->xmit\n");
11514 	dev_kfree_skb(skb);
11515 	return NETDEV_TX_OK;
11516 }
11517 
11518 static const struct net_device_ops ipw_prom_netdev_ops = {
11519 	.ndo_open 		= ipw_prom_open,
11520 	.ndo_stop		= ipw_prom_stop,
11521 	.ndo_start_xmit		= ipw_prom_hard_start_xmit,
11522 	.ndo_set_mac_address 	= eth_mac_addr,
11523 	.ndo_validate_addr	= eth_validate_addr,
11524 };
11525 
11526 static int ipw_prom_alloc(struct ipw_priv *priv)
11527 {
11528 	int rc = 0;
11529 
11530 	if (priv->prom_net_dev)
11531 		return -EPERM;
11532 
11533 	priv->prom_net_dev = alloc_libipw(sizeof(struct ipw_prom_priv), 1);
11534 	if (priv->prom_net_dev == NULL)
11535 		return -ENOMEM;
11536 
11537 	priv->prom_priv = libipw_priv(priv->prom_net_dev);
11538 	priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11539 	priv->prom_priv->priv = priv;
11540 
11541 	strcpy(priv->prom_net_dev->name, "rtap%d");
11542 	eth_hw_addr_set(priv->prom_net_dev, priv->mac_addr);
11543 
11544 	priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11545 	priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops;
11546 
11547 	priv->prom_net_dev->min_mtu = 68;
11548 	priv->prom_net_dev->max_mtu = LIBIPW_DATA_LEN;
11549 
11550 	priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11551 	SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
11552 
11553 	rc = register_netdev(priv->prom_net_dev);
11554 	if (rc) {
11555 		free_libipw(priv->prom_net_dev, 1);
11556 		priv->prom_net_dev = NULL;
11557 		return rc;
11558 	}
11559 
11560 	return 0;
11561 }
11562 
11563 static void ipw_prom_free(struct ipw_priv *priv)
11564 {
11565 	if (!priv->prom_net_dev)
11566 		return;
11567 
11568 	unregister_netdev(priv->prom_net_dev);
11569 	free_libipw(priv->prom_net_dev, 1);
11570 
11571 	priv->prom_net_dev = NULL;
11572 }
11573 
11574 #endif
11575 
11576 static const struct net_device_ops ipw_netdev_ops = {
11577 	.ndo_open		= ipw_net_open,
11578 	.ndo_stop		= ipw_net_stop,
11579 	.ndo_set_rx_mode	= ipw_net_set_multicast_list,
11580 	.ndo_set_mac_address	= ipw_net_set_mac_address,
11581 	.ndo_start_xmit		= libipw_xmit,
11582 	.ndo_validate_addr	= eth_validate_addr,
11583 };
11584 
11585 static int ipw_pci_probe(struct pci_dev *pdev,
11586 				   const struct pci_device_id *ent)
11587 {
11588 	int err = 0;
11589 	struct net_device *net_dev;
11590 	void __iomem *base;
11591 	u32 length, val;
11592 	struct ipw_priv *priv;
11593 	int i;
11594 
11595 	net_dev = alloc_libipw(sizeof(struct ipw_priv), 0);
11596 	if (net_dev == NULL) {
11597 		err = -ENOMEM;
11598 		goto out;
11599 	}
11600 
11601 	priv = libipw_priv(net_dev);
11602 	priv->ieee = netdev_priv(net_dev);
11603 
11604 	priv->net_dev = net_dev;
11605 	priv->pci_dev = pdev;
11606 	ipw_debug_level = debug;
11607 	spin_lock_init(&priv->irq_lock);
11608 	spin_lock_init(&priv->lock);
11609 	for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11610 		INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11611 
11612 	mutex_init(&priv->mutex);
11613 	if (pci_enable_device(pdev)) {
11614 		err = -ENODEV;
11615 		goto out_free_libipw;
11616 	}
11617 
11618 	pci_set_master(pdev);
11619 
11620 	err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
11621 	if (!err)
11622 		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
11623 	if (err) {
11624 		printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11625 		goto out_pci_disable_device;
11626 	}
11627 
11628 	pci_set_drvdata(pdev, priv);
11629 
11630 	err = pci_request_regions(pdev, DRV_NAME);
11631 	if (err)
11632 		goto out_pci_disable_device;
11633 
11634 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
11635 	 * PCI Tx retries from interfering with C3 CPU state */
11636 	pci_read_config_dword(pdev, 0x40, &val);
11637 	if ((val & 0x0000ff00) != 0)
11638 		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11639 
11640 	length = pci_resource_len(pdev, 0);
11641 	priv->hw_len = length;
11642 
11643 	base = pci_ioremap_bar(pdev, 0);
11644 	if (!base) {
11645 		err = -ENODEV;
11646 		goto out_pci_release_regions;
11647 	}
11648 
11649 	priv->hw_base = base;
11650 	IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11651 	IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11652 
11653 	ipw_setup_deferred_work(priv);
11654 
11655 	ipw_sw_reset(priv, 1);
11656 
11657 	err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11658 	if (err) {
11659 		IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11660 		goto out_iounmap;
11661 	}
11662 
11663 	SET_NETDEV_DEV(net_dev, &pdev->dev);
11664 
11665 	mutex_lock(&priv->mutex);
11666 
11667 	priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11668 	priv->ieee->set_security = shim__set_security;
11669 	priv->ieee->is_queue_full = ipw_net_is_queue_full;
11670 
11671 #ifdef CONFIG_IPW2200_QOS
11672 	priv->ieee->is_qos_active = ipw_is_qos_active;
11673 	priv->ieee->handle_probe_response = ipw_handle_beacon;
11674 	priv->ieee->handle_beacon = ipw_handle_probe_response;
11675 	priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11676 #endif				/* CONFIG_IPW2200_QOS */
11677 
11678 	priv->ieee->perfect_rssi = -20;
11679 	priv->ieee->worst_rssi = -85;
11680 
11681 	net_dev->netdev_ops = &ipw_netdev_ops;
11682 	priv->wireless_data.spy_data = &priv->ieee->spy_data;
11683 	net_dev->wireless_data = &priv->wireless_data;
11684 	net_dev->wireless_handlers = &ipw_wx_handler_def;
11685 	net_dev->ethtool_ops = &ipw_ethtool_ops;
11686 
11687 	net_dev->min_mtu = 68;
11688 	net_dev->max_mtu = LIBIPW_DATA_LEN;
11689 
11690 	err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11691 	if (err) {
11692 		IPW_ERROR("failed to create sysfs device attributes\n");
11693 		mutex_unlock(&priv->mutex);
11694 		goto out_release_irq;
11695 	}
11696 
11697 	if (ipw_up(priv)) {
11698 		mutex_unlock(&priv->mutex);
11699 		err = -EIO;
11700 		goto out_remove_sysfs;
11701 	}
11702 
11703 	mutex_unlock(&priv->mutex);
11704 
11705 	err = ipw_wdev_init(net_dev);
11706 	if (err) {
11707 		IPW_ERROR("failed to register wireless device\n");
11708 		goto out_remove_sysfs;
11709 	}
11710 
11711 	err = register_netdev(net_dev);
11712 	if (err) {
11713 		IPW_ERROR("failed to register network device\n");
11714 		goto out_unregister_wiphy;
11715 	}
11716 
11717 #ifdef CONFIG_IPW2200_PROMISCUOUS
11718 	if (rtap_iface) {
11719 	        err = ipw_prom_alloc(priv);
11720 		if (err) {
11721 			IPW_ERROR("Failed to register promiscuous network "
11722 				  "device (error %d).\n", err);
11723 			unregister_netdev(priv->net_dev);
11724 			goto out_unregister_wiphy;
11725 		}
11726 	}
11727 #endif
11728 
11729 	printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11730 	       "channels, %d 802.11a channels)\n",
11731 	       priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11732 	       priv->ieee->geo.a_channels);
11733 
11734 	return 0;
11735 
11736       out_unregister_wiphy:
11737 	wiphy_unregister(priv->ieee->wdev.wiphy);
11738 	kfree(priv->ieee->a_band.channels);
11739 	kfree(priv->ieee->bg_band.channels);
11740       out_remove_sysfs:
11741 	sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11742       out_release_irq:
11743 	free_irq(pdev->irq, priv);
11744       out_iounmap:
11745 	iounmap(priv->hw_base);
11746       out_pci_release_regions:
11747 	pci_release_regions(pdev);
11748       out_pci_disable_device:
11749 	pci_disable_device(pdev);
11750       out_free_libipw:
11751 	free_libipw(priv->net_dev, 0);
11752       out:
11753 	return err;
11754 }
11755 
11756 static void ipw_pci_remove(struct pci_dev *pdev)
11757 {
11758 	struct ipw_priv *priv = pci_get_drvdata(pdev);
11759 	struct list_head *p, *q;
11760 	int i;
11761 
11762 	if (!priv)
11763 		return;
11764 
11765 	mutex_lock(&priv->mutex);
11766 
11767 	priv->status |= STATUS_EXIT_PENDING;
11768 	ipw_down(priv);
11769 	sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11770 
11771 	mutex_unlock(&priv->mutex);
11772 
11773 	unregister_netdev(priv->net_dev);
11774 
11775 	if (priv->rxq) {
11776 		ipw_rx_queue_free(priv, priv->rxq);
11777 		priv->rxq = NULL;
11778 	}
11779 	ipw_tx_queue_free(priv);
11780 
11781 	if (priv->cmdlog) {
11782 		kfree(priv->cmdlog);
11783 		priv->cmdlog = NULL;
11784 	}
11785 
11786 	/* make sure all works are inactive */
11787 	cancel_delayed_work_sync(&priv->adhoc_check);
11788 	cancel_work_sync(&priv->associate);
11789 	cancel_work_sync(&priv->disassociate);
11790 	cancel_work_sync(&priv->system_config);
11791 	cancel_work_sync(&priv->rx_replenish);
11792 	cancel_work_sync(&priv->adapter_restart);
11793 	cancel_delayed_work_sync(&priv->rf_kill);
11794 	cancel_work_sync(&priv->up);
11795 	cancel_work_sync(&priv->down);
11796 	cancel_delayed_work_sync(&priv->request_scan);
11797 	cancel_delayed_work_sync(&priv->request_direct_scan);
11798 	cancel_delayed_work_sync(&priv->request_passive_scan);
11799 	cancel_delayed_work_sync(&priv->scan_event);
11800 	cancel_delayed_work_sync(&priv->gather_stats);
11801 	cancel_work_sync(&priv->abort_scan);
11802 	cancel_work_sync(&priv->roam);
11803 	cancel_delayed_work_sync(&priv->scan_check);
11804 	cancel_work_sync(&priv->link_up);
11805 	cancel_work_sync(&priv->link_down);
11806 	cancel_delayed_work_sync(&priv->led_link_on);
11807 	cancel_delayed_work_sync(&priv->led_link_off);
11808 	cancel_delayed_work_sync(&priv->led_act_off);
11809 	cancel_work_sync(&priv->merge_networks);
11810 
11811 	/* Free MAC hash list for ADHOC */
11812 	for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11813 		list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11814 			list_del(p);
11815 			kfree(list_entry(p, struct ipw_ibss_seq, list));
11816 		}
11817 	}
11818 
11819 	kfree(priv->error);
11820 	priv->error = NULL;
11821 
11822 #ifdef CONFIG_IPW2200_PROMISCUOUS
11823 	ipw_prom_free(priv);
11824 #endif
11825 
11826 	free_irq(pdev->irq, priv);
11827 	iounmap(priv->hw_base);
11828 	pci_release_regions(pdev);
11829 	pci_disable_device(pdev);
11830 	/* wiphy_unregister needs to be here, before free_libipw */
11831 	wiphy_unregister(priv->ieee->wdev.wiphy);
11832 	kfree(priv->ieee->a_band.channels);
11833 	kfree(priv->ieee->bg_band.channels);
11834 	free_libipw(priv->net_dev, 0);
11835 	free_firmware();
11836 }
11837 
11838 static int __maybe_unused ipw_pci_suspend(struct device *dev_d)
11839 {
11840 	struct ipw_priv *priv = dev_get_drvdata(dev_d);
11841 	struct net_device *dev = priv->net_dev;
11842 
11843 	printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11844 
11845 	/* Take down the device; powers it off, etc. */
11846 	ipw_down(priv);
11847 
11848 	/* Remove the PRESENT state of the device */
11849 	netif_device_detach(dev);
11850 
11851 	priv->suspend_at = ktime_get_boottime_seconds();
11852 
11853 	return 0;
11854 }
11855 
11856 static int __maybe_unused ipw_pci_resume(struct device *dev_d)
11857 {
11858 	struct pci_dev *pdev = to_pci_dev(dev_d);
11859 	struct ipw_priv *priv = pci_get_drvdata(pdev);
11860 	struct net_device *dev = priv->net_dev;
11861 	u32 val;
11862 
11863 	printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11864 
11865 	/*
11866 	 * Suspend/Resume resets the PCI configuration space, so we have to
11867 	 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11868 	 * from interfering with C3 CPU state. pci_restore_state won't help
11869 	 * here since it only restores the first 64 bytes pci config header.
11870 	 */
11871 	pci_read_config_dword(pdev, 0x40, &val);
11872 	if ((val & 0x0000ff00) != 0)
11873 		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11874 
11875 	/* Set the device back into the PRESENT state; this will also wake
11876 	 * the queue of needed */
11877 	netif_device_attach(dev);
11878 
11879 	priv->suspend_time = ktime_get_boottime_seconds() - priv->suspend_at;
11880 
11881 	/* Bring the device back up */
11882 	schedule_work(&priv->up);
11883 
11884 	return 0;
11885 }
11886 
11887 static void ipw_pci_shutdown(struct pci_dev *pdev)
11888 {
11889 	struct ipw_priv *priv = pci_get_drvdata(pdev);
11890 
11891 	/* Take down the device; powers it off, etc. */
11892 	ipw_down(priv);
11893 
11894 	pci_disable_device(pdev);
11895 }
11896 
11897 static SIMPLE_DEV_PM_OPS(ipw_pci_pm_ops, ipw_pci_suspend, ipw_pci_resume);
11898 
11899 /* driver initialization stuff */
11900 static struct pci_driver ipw_driver = {
11901 	.name = DRV_NAME,
11902 	.id_table = card_ids,
11903 	.probe = ipw_pci_probe,
11904 	.remove = ipw_pci_remove,
11905 	.driver.pm = &ipw_pci_pm_ops,
11906 	.shutdown = ipw_pci_shutdown,
11907 };
11908 
11909 static int __init ipw_init(void)
11910 {
11911 	int ret;
11912 
11913 	printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11914 	printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11915 
11916 	ret = pci_register_driver(&ipw_driver);
11917 	if (ret) {
11918 		IPW_ERROR("Unable to initialize PCI module\n");
11919 		return ret;
11920 	}
11921 
11922 	ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11923 	if (ret) {
11924 		IPW_ERROR("Unable to create driver sysfs file\n");
11925 		pci_unregister_driver(&ipw_driver);
11926 		return ret;
11927 	}
11928 
11929 	return ret;
11930 }
11931 
11932 static void __exit ipw_exit(void)
11933 {
11934 	driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11935 	pci_unregister_driver(&ipw_driver);
11936 }
11937 
11938 module_param(disable, int, 0444);
11939 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11940 
11941 module_param(associate, int, 0444);
11942 MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
11943 
11944 module_param(auto_create, int, 0444);
11945 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11946 
11947 module_param_named(led, led_support, int, 0444);
11948 MODULE_PARM_DESC(led, "enable led control on some systems (default 1 on)");
11949 
11950 module_param(debug, int, 0444);
11951 MODULE_PARM_DESC(debug, "debug output mask");
11952 
11953 module_param_named(channel, default_channel, int, 0444);
11954 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
11955 
11956 #ifdef CONFIG_IPW2200_PROMISCUOUS
11957 module_param(rtap_iface, int, 0444);
11958 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
11959 #endif
11960 
11961 #ifdef CONFIG_IPW2200_QOS
11962 module_param(qos_enable, int, 0444);
11963 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalities");
11964 
11965 module_param(qos_burst_enable, int, 0444);
11966 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
11967 
11968 module_param(qos_no_ack_mask, int, 0444);
11969 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
11970 
11971 module_param(burst_duration_CCK, int, 0444);
11972 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
11973 
11974 module_param(burst_duration_OFDM, int, 0444);
11975 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
11976 #endif				/* CONFIG_IPW2200_QOS */
11977 
11978 #ifdef CONFIG_IPW2200_MONITOR
11979 module_param_named(mode, network_mode, int, 0444);
11980 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
11981 #else
11982 module_param_named(mode, network_mode, int, 0444);
11983 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
11984 #endif
11985 
11986 module_param(bt_coexist, int, 0444);
11987 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
11988 
11989 module_param(hwcrypto, int, 0444);
11990 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
11991 
11992 module_param(cmdlog, int, 0444);
11993 MODULE_PARM_DESC(cmdlog,
11994 		 "allocate a ring buffer for logging firmware commands");
11995 
11996 module_param(roaming, int, 0444);
11997 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
11998 
11999 module_param(antenna, int, 0444);
12000 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
12001 
12002 module_exit(ipw_exit);
12003 module_init(ipw_init);
12004