xref: /titanic_52/usr/src/uts/common/io/iwp/iwp.c (revision 8de5c4f463386063e184a851437d58080c6c626c)
1 /*
2  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright (c) 2009, Intel Corporation
8  * All rights reserved.
9  */
10 
11 /*
12  * Copyright (c) 2006
13  * Copyright (c) 2007
14  *	Damien Bergamini <damien.bergamini@free.fr>
15  *
16  * Permission to use, copy, modify, and distribute this software for any
17  * purpose with or without fee is hereby granted, provided that the above
18  * copyright notice and this permission notice appear in all copies.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
21  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
22  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
24  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
25  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
26  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27  */
28 
29 /*
30  * Intel(R) WiFi Link 6000 Driver
31  */
32 
33 #include <sys/types.h>
34 #include <sys/byteorder.h>
35 #include <sys/conf.h>
36 #include <sys/cmn_err.h>
37 #include <sys/stat.h>
38 #include <sys/ddi.h>
39 #include <sys/sunddi.h>
40 #include <sys/strsubr.h>
41 #include <sys/ethernet.h>
42 #include <inet/common.h>
43 #include <inet/nd.h>
44 #include <inet/mi.h>
45 #include <sys/note.h>
46 #include <sys/stream.h>
47 #include <sys/strsun.h>
48 #include <sys/modctl.h>
49 #include <sys/devops.h>
50 #include <sys/dlpi.h>
51 #include <sys/mac_provider.h>
52 #include <sys/mac_wifi.h>
53 #include <sys/net80211.h>
54 #include <sys/net80211_proto.h>
55 #include <sys/varargs.h>
56 #include <sys/policy.h>
57 #include <sys/pci.h>
58 
59 #include "iwp_calibration.h"
60 #include "iwp_hw.h"
61 #include "iwp_eeprom.h"
62 #include "iwp_var.h"
63 #include <inet/wifi_ioctl.h>
64 
65 #ifdef DEBUG
66 #define	IWP_DEBUG_80211		(1 << 0)
67 #define	IWP_DEBUG_CMD		(1 << 1)
68 #define	IWP_DEBUG_DMA		(1 << 2)
69 #define	IWP_DEBUG_EEPROM	(1 << 3)
70 #define	IWP_DEBUG_FW		(1 << 4)
71 #define	IWP_DEBUG_HW		(1 << 5)
72 #define	IWP_DEBUG_INTR		(1 << 6)
73 #define	IWP_DEBUG_MRR		(1 << 7)
74 #define	IWP_DEBUG_PIO		(1 << 8)
75 #define	IWP_DEBUG_RX		(1 << 9)
76 #define	IWP_DEBUG_SCAN		(1 << 10)
77 #define	IWP_DEBUG_TX		(1 << 11)
78 #define	IWP_DEBUG_RATECTL	(1 << 12)
79 #define	IWP_DEBUG_RADIO		(1 << 13)
80 #define	IWP_DEBUG_RESUME	(1 << 14)
81 #define	IWP_DEBUG_CALIBRATION	(1 << 15)
82 /*
83  * if want to see debug message of a given section,
84  * please set this flag to one of above values
85  */
86 uint32_t iwp_dbg_flags = 0;
87 #define	IWP_DBG(x) \
88 	iwp_dbg x
89 #else
90 #define	IWP_DBG(x)
91 #endif
92 
93 static void	*iwp_soft_state_p = NULL;
94 
95 /*
96  * ucode will be compiled into driver image
97  */
98 static uint8_t iwp_fw_bin [] = {
99 #include "fw-iw/iwp.ucode"
100 };
101 
102 /*
103  * DMA attributes for a shared page
104  */
105 static ddi_dma_attr_t sh_dma_attr = {
106 	DMA_ATTR_V0,	/* version of this structure */
107 	0,		/* lowest usable address */
108 	0xffffffffU,	/* highest usable address */
109 	0xffffffffU,	/* maximum DMAable byte count */
110 	0x1000,		/* alignment in bytes */
111 	0x1000,		/* burst sizes (any?) */
112 	1,		/* minimum transfer */
113 	0xffffffffU,	/* maximum transfer */
114 	0xffffffffU,	/* maximum segment length */
115 	1,		/* maximum number of segments */
116 	1,		/* granularity */
117 	0,		/* flags (reserved) */
118 };
119 
120 /*
121  * DMA attributes for a keep warm DRAM descriptor
122  */
123 static ddi_dma_attr_t kw_dma_attr = {
124 	DMA_ATTR_V0,	/* version of this structure */
125 	0,		/* lowest usable address */
126 	0xffffffffU,	/* highest usable address */
127 	0xffffffffU,	/* maximum DMAable byte count */
128 	0x1000,		/* alignment in bytes */
129 	0x1000,		/* burst sizes (any?) */
130 	1,		/* minimum transfer */
131 	0xffffffffU,	/* maximum transfer */
132 	0xffffffffU,	/* maximum segment length */
133 	1,		/* maximum number of segments */
134 	1,		/* granularity */
135 	0,		/* flags (reserved) */
136 };
137 
138 /*
139  * DMA attributes for a ring descriptor
140  */
141 static ddi_dma_attr_t ring_desc_dma_attr = {
142 	DMA_ATTR_V0,	/* version of this structure */
143 	0,		/* lowest usable address */
144 	0xffffffffU,	/* highest usable address */
145 	0xffffffffU,	/* maximum DMAable byte count */
146 	0x100,		/* alignment in bytes */
147 	0x100,		/* burst sizes (any?) */
148 	1,		/* minimum transfer */
149 	0xffffffffU,	/* maximum transfer */
150 	0xffffffffU,	/* maximum segment length */
151 	1,		/* maximum number of segments */
152 	1,		/* granularity */
153 	0,		/* flags (reserved) */
154 };
155 
156 /*
157  * DMA attributes for a cmd
158  */
159 static ddi_dma_attr_t cmd_dma_attr = {
160 	DMA_ATTR_V0,	/* version of this structure */
161 	0,		/* lowest usable address */
162 	0xffffffffU,	/* highest usable address */
163 	0xffffffffU,	/* maximum DMAable byte count */
164 	4,		/* alignment in bytes */
165 	0x100,		/* burst sizes (any?) */
166 	1,		/* minimum transfer */
167 	0xffffffffU,	/* maximum transfer */
168 	0xffffffffU,	/* maximum segment length */
169 	1,		/* maximum number of segments */
170 	1,		/* granularity */
171 	0,		/* flags (reserved) */
172 };
173 
174 /*
175  * DMA attributes for a rx buffer
176  */
177 static ddi_dma_attr_t rx_buffer_dma_attr = {
178 	DMA_ATTR_V0,	/* version of this structure */
179 	0,		/* lowest usable address */
180 	0xffffffffU,	/* highest usable address */
181 	0xffffffffU,	/* maximum DMAable byte count */
182 	0x100,		/* alignment in bytes */
183 	0x100,		/* burst sizes (any?) */
184 	1,		/* minimum transfer */
185 	0xffffffffU,	/* maximum transfer */
186 	0xffffffffU,	/* maximum segment length */
187 	1,		/* maximum number of segments */
188 	1,		/* granularity */
189 	0,		/* flags (reserved) */
190 };
191 
192 /*
193  * DMA attributes for a tx buffer.
194  * the maximum number of segments is 4 for the hardware.
195  * now all the wifi drivers put the whole frame in a single
196  * descriptor, so we define the maximum  number of segments 1,
197  * just the same as the rx_buffer. we consider leverage the HW
198  * ability in the future, that is why we don't define rx and tx
199  * buffer_dma_attr as the same.
200  */
201 static ddi_dma_attr_t tx_buffer_dma_attr = {
202 	DMA_ATTR_V0,	/* version of this structure */
203 	0,		/* lowest usable address */
204 	0xffffffffU,	/* highest usable address */
205 	0xffffffffU,	/* maximum DMAable byte count */
206 	4,		/* alignment in bytes */
207 	0x100,		/* burst sizes (any?) */
208 	1,		/* minimum transfer */
209 	0xffffffffU,	/* maximum transfer */
210 	0xffffffffU,	/* maximum segment length */
211 	1,		/* maximum number of segments */
212 	1,		/* granularity */
213 	0,		/* flags (reserved) */
214 };
215 
216 /*
217  * DMA attributes for text and data part in the firmware
218  */
219 static ddi_dma_attr_t fw_dma_attr = {
220 	DMA_ATTR_V0,	/* version of this structure */
221 	0,		/* lowest usable address */
222 	0xffffffffU,	/* highest usable address */
223 	0x7fffffff,	/* maximum DMAable byte count */
224 	0x10,		/* alignment in bytes */
225 	0x100,		/* burst sizes (any?) */
226 	1,		/* minimum transfer */
227 	0xffffffffU,	/* maximum transfer */
228 	0xffffffffU,	/* maximum segment length */
229 	1,		/* maximum number of segments */
230 	1,		/* granularity */
231 	0,		/* flags (reserved) */
232 };
233 
234 /*
235  * regs access attributes
236  */
237 static ddi_device_acc_attr_t iwp_reg_accattr = {
238 	DDI_DEVICE_ATTR_V0,
239 	DDI_STRUCTURE_LE_ACC,
240 	DDI_STRICTORDER_ACC,
241 	DDI_DEFAULT_ACC
242 };
243 
244 /*
245  * DMA access attributes for descriptor
246  */
247 static ddi_device_acc_attr_t iwp_dma_descattr = {
248 	DDI_DEVICE_ATTR_V0,
249 	DDI_STRUCTURE_LE_ACC,
250 	DDI_STRICTORDER_ACC,
251 	DDI_DEFAULT_ACC
252 };
253 
254 /*
255  * DMA access attributes
256  */
257 static ddi_device_acc_attr_t iwp_dma_accattr = {
258 	DDI_DEVICE_ATTR_V0,
259 	DDI_NEVERSWAP_ACC,
260 	DDI_STRICTORDER_ACC,
261 	DDI_DEFAULT_ACC
262 };
263 
264 static int	iwp_ring_init(iwp_sc_t *);
265 static void	iwp_ring_free(iwp_sc_t *);
266 static int	iwp_alloc_shared(iwp_sc_t *);
267 static void	iwp_free_shared(iwp_sc_t *);
268 static int	iwp_alloc_kw(iwp_sc_t *);
269 static void	iwp_free_kw(iwp_sc_t *);
270 static int	iwp_alloc_fw_dma(iwp_sc_t *);
271 static void	iwp_free_fw_dma(iwp_sc_t *);
272 static int	iwp_alloc_rx_ring(iwp_sc_t *);
273 static void	iwp_reset_rx_ring(iwp_sc_t *);
274 static void	iwp_free_rx_ring(iwp_sc_t *);
275 static int	iwp_alloc_tx_ring(iwp_sc_t *, iwp_tx_ring_t *,
276     int, int);
277 static void	iwp_reset_tx_ring(iwp_sc_t *, iwp_tx_ring_t *);
278 static void	iwp_free_tx_ring(iwp_tx_ring_t *);
279 static ieee80211_node_t *iwp_node_alloc(ieee80211com_t *);
280 static void	iwp_node_free(ieee80211_node_t *);
281 static int	iwp_newstate(ieee80211com_t *, enum ieee80211_state, int);
282 static void	iwp_mac_access_enter(iwp_sc_t *);
283 static void	iwp_mac_access_exit(iwp_sc_t *);
284 static uint32_t	iwp_reg_read(iwp_sc_t *, uint32_t);
285 static void	iwp_reg_write(iwp_sc_t *, uint32_t, uint32_t);
286 static int	iwp_load_init_firmware(iwp_sc_t *);
287 static int	iwp_load_run_firmware(iwp_sc_t *);
288 static void	iwp_tx_intr(iwp_sc_t *, iwp_rx_desc_t *);
289 static void	iwp_cmd_intr(iwp_sc_t *, iwp_rx_desc_t *);
290 static uint_t   iwp_intr(caddr_t, caddr_t);
291 static int	iwp_eep_load(iwp_sc_t *);
292 static void	iwp_get_mac_from_eep(iwp_sc_t *);
293 static int	iwp_eep_sem_down(iwp_sc_t *);
294 static void	iwp_eep_sem_up(iwp_sc_t *);
295 static uint_t   iwp_rx_softintr(caddr_t, caddr_t);
296 static uint8_t	iwp_rate_to_plcp(int);
297 static int	iwp_cmd(iwp_sc_t *, int, const void *, int, int);
298 static void	iwp_set_led(iwp_sc_t *, uint8_t, uint8_t, uint8_t);
299 static int	iwp_hw_set_before_auth(iwp_sc_t *);
300 static int	iwp_scan(iwp_sc_t *);
301 static int	iwp_config(iwp_sc_t *);
302 static void	iwp_stop_master(iwp_sc_t *);
303 static int	iwp_power_up(iwp_sc_t *);
304 static int	iwp_preinit(iwp_sc_t *);
305 static int	iwp_init(iwp_sc_t *);
306 static void	iwp_stop(iwp_sc_t *);
307 static int	iwp_quiesce(dev_info_t *t);
308 static void	iwp_amrr_init(iwp_amrr_t *);
309 static void	iwp_amrr_timeout(iwp_sc_t *);
310 static void	iwp_amrr_ratectl(void *, ieee80211_node_t *);
311 static void	iwp_ucode_alive(iwp_sc_t *, iwp_rx_desc_t *);
312 static void	iwp_rx_phy_intr(iwp_sc_t *, iwp_rx_desc_t *);
313 static void	iwp_rx_mpdu_intr(iwp_sc_t *, iwp_rx_desc_t *);
314 static void	iwp_release_calib_buffer(iwp_sc_t *);
315 static int	iwp_init_common(iwp_sc_t *);
316 static uint8_t	*iwp_eep_addr_trans(iwp_sc_t *, uint32_t);
317 static int	iwp_put_seg_fw(iwp_sc_t *, uint32_t, uint32_t, uint32_t);
318 static	int	iwp_alive_common(iwp_sc_t *);
319 static void	iwp_save_calib_result(iwp_sc_t *, iwp_rx_desc_t *);
320 static int	iwp_attach(dev_info_t *, ddi_attach_cmd_t);
321 static int	iwp_detach(dev_info_t *, ddi_detach_cmd_t);
322 static void	iwp_destroy_locks(iwp_sc_t *);
323 static int	iwp_send(ieee80211com_t *, mblk_t *, uint8_t);
324 static void	iwp_thread(iwp_sc_t *);
325 static int	iwp_run_state_config(iwp_sc_t *);
326 static int	iwp_fast_recover(iwp_sc_t *);
327 static void	iwp_overwrite_ic_default(iwp_sc_t *);
328 static int	iwp_add_ap_sta(iwp_sc_t *);
329 static int	iwp_alloc_dma_mem(iwp_sc_t *, size_t,
330     ddi_dma_attr_t *, ddi_device_acc_attr_t *,
331     uint_t, iwp_dma_t *);
332 static void	iwp_free_dma_mem(iwp_dma_t *);
333 static int	iwp_eep_ver_chk(iwp_sc_t *);
334 static void	iwp_set_chip_param(iwp_sc_t *);
335 
336 /*
337  * GLD specific operations
338  */
339 static int	iwp_m_stat(void *, uint_t, uint64_t *);
340 static int	iwp_m_start(void *);
341 static void	iwp_m_stop(void *);
342 static int	iwp_m_unicst(void *, const uint8_t *);
343 static int	iwp_m_multicst(void *, boolean_t, const uint8_t *);
344 static int	iwp_m_promisc(void *, boolean_t);
345 static mblk_t	*iwp_m_tx(void *, mblk_t *);
346 static void	iwp_m_ioctl(void *, queue_t *, mblk_t *);
347 static int	iwp_m_setprop(void *arg, const char *pr_name,
348     mac_prop_id_t wldp_pr_num, uint_t wldp_length, const void *wldp_buf);
349 static int	iwp_m_getprop(void *arg, const char *pr_name,
350     mac_prop_id_t wldp_pr_num, uint_t pr_flags, uint_t wldp_length,
351     void *wldp_buf, uint_t *perm);
352 
353 /*
354  * Supported rates for 802.11b/g modes (in 500Kbps unit).
355  */
356 static const struct ieee80211_rateset iwp_rateset_11b =
357 	{ 4, { 2, 4, 11, 22 } };
358 
359 static const struct ieee80211_rateset iwp_rateset_11g =
360 	{ 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
361 
362 /*
363  * For mfthread only
364  */
365 extern pri_t minclsyspri;
366 
367 #define	DRV_NAME_SP	"iwp"
368 
369 /*
370  * Module Loading Data & Entry Points
371  */
372 DDI_DEFINE_STREAM_OPS(iwp_devops, nulldev, nulldev, iwp_attach,
373     iwp_detach, nodev, NULL, D_MP, NULL, iwp_quiesce);
374 
375 static struct modldrv iwp_modldrv = {
376 	&mod_driverops,
377 	"Intel(R) PumaPeak driver(N)",
378 	&iwp_devops
379 };
380 
381 static struct modlinkage iwp_modlinkage = {
382 	MODREV_1,
383 	&iwp_modldrv,
384 	NULL
385 };
386 
387 int
388 _init(void)
389 {
390 	int	status;
391 
392 	status = ddi_soft_state_init(&iwp_soft_state_p,
393 	    sizeof (iwp_sc_t), 1);
394 	if (status != DDI_SUCCESS) {
395 		return (status);
396 	}
397 
398 	mac_init_ops(&iwp_devops, DRV_NAME_SP);
399 	status = mod_install(&iwp_modlinkage);
400 	if (status != DDI_SUCCESS) {
401 		mac_fini_ops(&iwp_devops);
402 		ddi_soft_state_fini(&iwp_soft_state_p);
403 	}
404 
405 	return (status);
406 }
407 
408 int
409 _fini(void)
410 {
411 	int status;
412 
413 	status = mod_remove(&iwp_modlinkage);
414 	if (DDI_SUCCESS == status) {
415 		mac_fini_ops(&iwp_devops);
416 		ddi_soft_state_fini(&iwp_soft_state_p);
417 	}
418 
419 	return (status);
420 }
421 
422 int
423 _info(struct modinfo *mip)
424 {
425 	return (mod_info(&iwp_modlinkage, mip));
426 }
427 
428 /*
429  * Mac Call Back entries
430  */
431 mac_callbacks_t	iwp_m_callbacks = {
432 	MC_IOCTL | MC_SETPROP | MC_GETPROP,
433 	iwp_m_stat,
434 	iwp_m_start,
435 	iwp_m_stop,
436 	iwp_m_promisc,
437 	iwp_m_multicst,
438 	iwp_m_unicst,
439 	iwp_m_tx,
440 	iwp_m_ioctl,
441 	NULL,
442 	NULL,
443 	NULL,
444 	iwp_m_setprop,
445 	iwp_m_getprop
446 };
447 
448 #ifdef DEBUG
449 void
450 iwp_dbg(uint32_t flags, const char *fmt, ...)
451 {
452 	va_list	ap;
453 
454 	if (flags & iwp_dbg_flags) {
455 		va_start(ap, fmt);
456 		vcmn_err(CE_NOTE, fmt, ap);
457 		va_end(ap);
458 	}
459 }
460 #endif	/* DEBUG */
461 
462 /*
463  * device operations
464  */
465 int
466 iwp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
467 {
468 	iwp_sc_t		*sc;
469 	ieee80211com_t		*ic;
470 	int			instance, i;
471 	char			strbuf[32];
472 	wifi_data_t		wd = { 0 };
473 	mac_register_t		*macp;
474 	int			intr_type;
475 	int			intr_count;
476 	int			intr_actual;
477 	int			err = DDI_FAILURE;
478 
479 	switch (cmd) {
480 	case DDI_ATTACH:
481 		break;
482 	case DDI_RESUME:
483 		instance = ddi_get_instance(dip);
484 		sc = ddi_get_soft_state(iwp_soft_state_p,
485 		    instance);
486 		ASSERT(sc != NULL);
487 
488 		if (sc->sc_flags & IWP_F_RUNNING) {
489 			(void) iwp_init(sc);
490 		}
491 
492 		atomic_and_32(&sc->sc_flags, ~IWP_F_SUSPEND);
493 
494 		IWP_DBG((IWP_DEBUG_RESUME, "iwp_attach(): "
495 		    "resume\n"));
496 		return (DDI_SUCCESS);
497 	default:
498 		goto attach_fail1;
499 	}
500 
501 	instance = ddi_get_instance(dip);
502 	err = ddi_soft_state_zalloc(iwp_soft_state_p, instance);
503 	if (err != DDI_SUCCESS) {
504 		cmn_err(CE_WARN, "iwp_attach(): "
505 		    "failed to allocate soft state\n");
506 		goto attach_fail1;
507 	}
508 
509 	sc = ddi_get_soft_state(iwp_soft_state_p, instance);
510 	ASSERT(sc != NULL);
511 
512 	sc->sc_dip = dip;
513 
514 	/*
515 	 * map configure space
516 	 */
517 	err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0,
518 	    &iwp_reg_accattr, &sc->sc_cfg_handle);
519 	if (err != DDI_SUCCESS) {
520 		cmn_err(CE_WARN, "iwp_attach(): "
521 		    "failed to map config spaces regs\n");
522 		goto attach_fail2;
523 	}
524 
525 	sc->sc_dev_id = ddi_get16(sc->sc_cfg_handle,
526 	    (uint16_t *)(sc->sc_cfg_base + PCI_CONF_DEVID));
527 	if ((sc->sc_dev_id != 0x422B) &&
528 	    (sc->sc_dev_id != 0x422C) &&
529 	    (sc->sc_dev_id != 0x4238) &&
530 	    (sc->sc_dev_id != 0x4239) &&
531 	    (sc->sc_dev_id != 0x008d) &&
532 	    (sc->sc_dev_id != 0x008e)) {
533 		cmn_err(CE_WARN, "iwp_attach(): "
534 		    "Do not support this device\n");
535 		goto attach_fail3;
536 	}
537 
538 	iwp_set_chip_param(sc);
539 
540 	sc->sc_rev = ddi_get8(sc->sc_cfg_handle,
541 	    (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID));
542 
543 	/*
544 	 * keep from disturbing C3 state of CPU
545 	 */
546 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base +
547 	    PCI_CFG_RETRY_TIMEOUT), 0);
548 
549 	/*
550 	 * determine the size of buffer for frame and command to ucode
551 	 */
552 	sc->sc_clsz = ddi_get16(sc->sc_cfg_handle,
553 	    (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ));
554 	if (!sc->sc_clsz) {
555 		sc->sc_clsz = 16;
556 	}
557 	sc->sc_clsz = (sc->sc_clsz << 2);
558 
559 	sc->sc_dmabuf_sz = roundup(0x1000 + sizeof (struct ieee80211_frame) +
560 	    IEEE80211_MTU + IEEE80211_CRC_LEN +
561 	    (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
562 	    IEEE80211_WEP_CRCLEN), sc->sc_clsz);
563 
564 	/*
565 	 * Map operating registers
566 	 */
567 	err = ddi_regs_map_setup(dip, 1, &sc->sc_base,
568 	    0, 0, &iwp_reg_accattr, &sc->sc_handle);
569 	if (err != DDI_SUCCESS) {
570 		cmn_err(CE_WARN, "iwp_attach(): "
571 		    "failed to map device regs\n");
572 		goto attach_fail3;
573 	}
574 
575 	/*
576 	 * this is used to differentiate type of hardware
577 	 */
578 	sc->sc_hw_rev = IWP_READ(sc, CSR_HW_REV);
579 
580 	err = ddi_intr_get_supported_types(dip, &intr_type);
581 	if ((err != DDI_SUCCESS) || (!(intr_type & DDI_INTR_TYPE_FIXED))) {
582 		cmn_err(CE_WARN, "iwp_attach(): "
583 		    "fixed type interrupt is not supported\n");
584 		goto attach_fail4;
585 	}
586 
587 	err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &intr_count);
588 	if ((err != DDI_SUCCESS) || (intr_count != 1)) {
589 		cmn_err(CE_WARN, "iwp_attach(): "
590 		    "no fixed interrupts\n");
591 		goto attach_fail4;
592 	}
593 
594 	sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
595 
596 	err = ddi_intr_alloc(dip, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0,
597 	    intr_count, &intr_actual, 0);
598 	if ((err != DDI_SUCCESS) || (intr_actual != 1)) {
599 		cmn_err(CE_WARN, "iwp_attach(): "
600 		    "ddi_intr_alloc() failed 0x%x\n", err);
601 		goto attach_fail5;
602 	}
603 
604 	err = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri);
605 	if (err != DDI_SUCCESS) {
606 		cmn_err(CE_WARN, "iwp_attach(): "
607 		    "ddi_intr_get_pri() failed 0x%x\n", err);
608 		goto attach_fail6;
609 	}
610 
611 	mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER,
612 	    DDI_INTR_PRI(sc->sc_intr_pri));
613 	mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER,
614 	    DDI_INTR_PRI(sc->sc_intr_pri));
615 	mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER,
616 	    DDI_INTR_PRI(sc->sc_intr_pri));
617 
618 	cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL);
619 	cv_init(&sc->sc_put_seg_cv, NULL, CV_DRIVER, NULL);
620 	cv_init(&sc->sc_ucode_cv, NULL, CV_DRIVER, NULL);
621 
622 	/*
623 	 * initialize the mfthread
624 	 */
625 	cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL);
626 	sc->sc_mf_thread = NULL;
627 	sc->sc_mf_thread_switch = 0;
628 
629 	/*
630 	 * Allocate shared buffer for communication between driver and ucode.
631 	 */
632 	err = iwp_alloc_shared(sc);
633 	if (err != DDI_SUCCESS) {
634 		cmn_err(CE_WARN, "iwp_attach(): "
635 		    "failed to allocate shared page\n");
636 		goto attach_fail7;
637 	}
638 
639 	(void) memset(sc->sc_shared, 0, sizeof (iwp_shared_t));
640 
641 	/*
642 	 * Allocate keep warm page.
643 	 */
644 	err = iwp_alloc_kw(sc);
645 	if (err != DDI_SUCCESS) {
646 		cmn_err(CE_WARN, "iwp_attach(): "
647 		    "failed to allocate keep warm page\n");
648 		goto attach_fail8;
649 	}
650 
651 	/*
652 	 * Do some necessary hardware initializations.
653 	 */
654 	err = iwp_preinit(sc);
655 	if (err != IWP_SUCCESS) {
656 		cmn_err(CE_WARN, "iwp_attach(): "
657 		    "failed to initialize hardware\n");
658 		goto attach_fail9;
659 	}
660 
661 	/*
662 	 * get hardware configurations from eeprom
663 	 */
664 	err = iwp_eep_load(sc);
665 	if (err != IWP_SUCCESS) {
666 		cmn_err(CE_WARN, "iwp_attach(): "
667 		    "failed to load eeprom\n");
668 		goto attach_fail9;
669 	}
670 
671 	/*
672 	 * calibration information from EEPROM
673 	 */
674 	sc->sc_eep_calib = (struct iwp_eep_calibration *)
675 	    iwp_eep_addr_trans(sc, EEP_CALIBRATION);
676 
677 	err = iwp_eep_ver_chk(sc);
678 	if (err != IWP_SUCCESS) {
679 		goto attach_fail9;
680 	}
681 
682 	/*
683 	 * get MAC address of this chipset
684 	 */
685 	iwp_get_mac_from_eep(sc);
686 
687 
688 	/*
689 	 * initialize TX and RX ring buffers
690 	 */
691 	err = iwp_ring_init(sc);
692 	if (err != DDI_SUCCESS) {
693 		cmn_err(CE_WARN, "iwp_attach(): "
694 		    "failed to allocate and initialize ring\n");
695 		goto attach_fail9;
696 	}
697 
698 	sc->sc_hdr = (iwp_firmware_hdr_t *)iwp_fw_bin;
699 
700 	/*
701 	 * copy ucode to dma buffer
702 	 */
703 	err = iwp_alloc_fw_dma(sc);
704 	if (err != DDI_SUCCESS) {
705 		cmn_err(CE_WARN, "iwp_attach(): "
706 		    "failed to allocate firmware dma\n");
707 		goto attach_fail10;
708 	}
709 
710 	/*
711 	 * Initialize the wifi part, which will be used by
712 	 * 802.11 module
713 	 */
714 	ic = &sc->sc_ic;
715 	ic->ic_phytype  = IEEE80211_T_OFDM;
716 	ic->ic_opmode   = IEEE80211_M_STA; /* default to BSS mode */
717 	ic->ic_state    = IEEE80211_S_INIT;
718 	ic->ic_maxrssi  = 100; /* experimental number */
719 	ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT |
720 	    IEEE80211_C_PMGT | IEEE80211_C_SHSLOT;
721 
722 	/*
723 	 * Support WPA/WPA2
724 	 */
725 	ic->ic_caps |= IEEE80211_C_WPA;
726 
727 	/*
728 	 * set supported .11b and .11g rates
729 	 */
730 	ic->ic_sup_rates[IEEE80211_MODE_11B] = iwp_rateset_11b;
731 	ic->ic_sup_rates[IEEE80211_MODE_11G] = iwp_rateset_11g;
732 
733 	/*
734 	 * set supported .11b and .11g channels (1 through 11)
735 	 */
736 	for (i = 1; i <= 11; i++) {
737 		ic->ic_sup_channels[i].ich_freq =
738 		    ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
739 		ic->ic_sup_channels[i].ich_flags =
740 		    IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
741 		    IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ |
742 		    IEEE80211_CHAN_PASSIVE;
743 	}
744 
745 	ic->ic_ibss_chan = &ic->ic_sup_channels[0];
746 	ic->ic_xmit = iwp_send;
747 
748 	/*
749 	 * attach to 802.11 module
750 	 */
751 	ieee80211_attach(ic);
752 
753 	/*
754 	 * different instance has different WPA door
755 	 */
756 	(void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
757 	    ddi_driver_name(dip),
758 	    ddi_get_instance(dip));
759 
760 	/*
761 	 * Overwrite 80211 default configurations.
762 	 */
763 	iwp_overwrite_ic_default(sc);
764 
765 	/*
766 	 * initialize 802.11 module
767 	 */
768 	ieee80211_media_init(ic);
769 
770 	/*
771 	 * initialize default tx key
772 	 */
773 	ic->ic_def_txkey = 0;
774 
775 	err = ddi_intr_add_softint(dip, &sc->sc_soft_hdl, DDI_INTR_SOFTPRI_MAX,
776 	    iwp_rx_softintr, (caddr_t)sc);
777 	if (err != DDI_SUCCESS) {
778 		cmn_err(CE_WARN, "iwp_attach(): "
779 		    "add soft interrupt failed\n");
780 		goto attach_fail12;
781 	}
782 
783 	err = ddi_intr_add_handler(sc->sc_intr_htable[0], iwp_intr,
784 	    (caddr_t)sc, NULL);
785 	if (err != DDI_SUCCESS) {
786 		cmn_err(CE_WARN, "iwp_attach(): "
787 		    "ddi_intr_add_handle() failed\n");
788 		goto attach_fail13;
789 	}
790 
791 	err = ddi_intr_enable(sc->sc_intr_htable[0]);
792 	if (err != DDI_SUCCESS) {
793 		cmn_err(CE_WARN, "iwp_attach(): "
794 		    "ddi_intr_enable() failed\n");
795 		goto attach_fail14;
796 	}
797 
798 	/*
799 	 * Initialize pointer to device specific functions
800 	 */
801 	wd.wd_secalloc = WIFI_SEC_NONE;
802 	wd.wd_opmode = ic->ic_opmode;
803 	IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr);
804 
805 	/*
806 	 * create relation to GLD
807 	 */
808 	macp = mac_alloc(MAC_VERSION);
809 	if (NULL == macp) {
810 		cmn_err(CE_WARN, "iwp_attach(): "
811 		    "failed to do mac_alloc()\n");
812 		goto attach_fail15;
813 	}
814 
815 	macp->m_type_ident	= MAC_PLUGIN_IDENT_WIFI;
816 	macp->m_driver		= sc;
817 	macp->m_dip		= dip;
818 	macp->m_src_addr	= ic->ic_macaddr;
819 	macp->m_callbacks	= &iwp_m_callbacks;
820 	macp->m_min_sdu		= 0;
821 	macp->m_max_sdu		= IEEE80211_MTU;
822 	macp->m_pdata		= &wd;
823 	macp->m_pdata_size	= sizeof (wd);
824 
825 	/*
826 	 * Register the macp to mac
827 	 */
828 	err = mac_register(macp, &ic->ic_mach);
829 	mac_free(macp);
830 	if (err != DDI_SUCCESS) {
831 		cmn_err(CE_WARN, "iwp_attach(): "
832 		    "failed to do mac_register()\n");
833 		goto attach_fail15;
834 	}
835 
836 	/*
837 	 * Create minor node of type DDI_NT_NET_WIFI
838 	 */
839 	(void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_SP"%d", instance);
840 	err = ddi_create_minor_node(dip, strbuf, S_IFCHR,
841 	    instance + 1, DDI_NT_NET_WIFI, 0);
842 	if (err != DDI_SUCCESS) {
843 		cmn_err(CE_WARN, "iwp_attach(): "
844 		    "failed to do ddi_create_minor_node()\n");
845 	}
846 
847 	/*
848 	 * Notify link is down now
849 	 */
850 	mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
851 
852 	/*
853 	 * create the mf thread to handle the link status,
854 	 * recovery fatal error, etc.
855 	 */
856 	sc->sc_mf_thread_switch = 1;
857 	if (NULL == sc->sc_mf_thread) {
858 		sc->sc_mf_thread = thread_create((caddr_t)NULL, 0,
859 		    iwp_thread, sc, 0, &p0, TS_RUN, minclsyspri);
860 	}
861 
862 	atomic_or_32(&sc->sc_flags, IWP_F_ATTACHED);
863 
864 	return (DDI_SUCCESS);
865 
866 attach_fail15:
867 	(void) ddi_intr_disable(sc->sc_intr_htable[0]);
868 attach_fail14:
869 	(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
870 attach_fail13:
871 	(void) ddi_intr_remove_softint(sc->sc_soft_hdl);
872 	sc->sc_soft_hdl = NULL;
873 attach_fail12:
874 	ieee80211_detach(ic);
875 attach_fail11:
876 	iwp_free_fw_dma(sc);
877 attach_fail10:
878 	iwp_ring_free(sc);
879 attach_fail9:
880 	iwp_free_kw(sc);
881 attach_fail8:
882 	iwp_free_shared(sc);
883 attach_fail7:
884 	iwp_destroy_locks(sc);
885 attach_fail6:
886 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
887 attach_fail5:
888 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
889 attach_fail4:
890 	ddi_regs_map_free(&sc->sc_handle);
891 attach_fail3:
892 	ddi_regs_map_free(&sc->sc_cfg_handle);
893 attach_fail2:
894 	ddi_soft_state_free(iwp_soft_state_p, instance);
895 attach_fail1:
896 	return (DDI_FAILURE);
897 }
898 
899 int
900 iwp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
901 {
902 	iwp_sc_t *sc;
903 	ieee80211com_t	*ic;
904 	int err;
905 
906 	sc = ddi_get_soft_state(iwp_soft_state_p, ddi_get_instance(dip));
907 	ASSERT(sc != NULL);
908 	ic = &sc->sc_ic;
909 
910 	switch (cmd) {
911 	case DDI_DETACH:
912 		break;
913 	case DDI_SUSPEND:
914 		atomic_and_32(&sc->sc_flags, ~IWP_F_HW_ERR_RECOVER);
915 		atomic_and_32(&sc->sc_flags, ~IWP_F_RATE_AUTO_CTL);
916 
917 		atomic_or_32(&sc->sc_flags, IWP_F_SUSPEND);
918 
919 		if (sc->sc_flags & IWP_F_RUNNING) {
920 			iwp_stop(sc);
921 			ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
922 
923 		}
924 
925 		IWP_DBG((IWP_DEBUG_RESUME, "iwp_detach(): "
926 		    "suspend\n"));
927 		return (DDI_SUCCESS);
928 	default:
929 		return (DDI_FAILURE);
930 	}
931 
932 	if (!(sc->sc_flags & IWP_F_ATTACHED)) {
933 		return (DDI_FAILURE);
934 	}
935 
936 	/*
937 	 * Destroy the mf_thread
938 	 */
939 	sc->sc_mf_thread_switch = 0;
940 
941 	mutex_enter(&sc->sc_mt_lock);
942 	while (sc->sc_mf_thread != NULL) {
943 		if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0) {
944 			break;
945 		}
946 	}
947 	mutex_exit(&sc->sc_mt_lock);
948 
949 	err = mac_disable(sc->sc_ic.ic_mach);
950 	if (err != DDI_SUCCESS) {
951 		return (err);
952 	}
953 
954 	/*
955 	 * stop chipset
956 	 */
957 	iwp_stop(sc);
958 
959 	DELAY(500000);
960 
961 	/*
962 	 * release buffer for calibration
963 	 */
964 	iwp_release_calib_buffer(sc);
965 
966 	/*
967 	 * Unregiste from GLD
968 	 */
969 	(void) mac_unregister(sc->sc_ic.ic_mach);
970 
971 	mutex_enter(&sc->sc_glock);
972 	iwp_free_fw_dma(sc);
973 	iwp_ring_free(sc);
974 	iwp_free_kw(sc);
975 	iwp_free_shared(sc);
976 	mutex_exit(&sc->sc_glock);
977 
978 	(void) ddi_intr_disable(sc->sc_intr_htable[0]);
979 	(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
980 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
981 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
982 
983 	(void) ddi_intr_remove_softint(sc->sc_soft_hdl);
984 	sc->sc_soft_hdl = NULL;
985 
986 	/*
987 	 * detach from 80211 module
988 	 */
989 	ieee80211_detach(&sc->sc_ic);
990 
991 	iwp_destroy_locks(sc);
992 
993 	ddi_regs_map_free(&sc->sc_handle);
994 	ddi_regs_map_free(&sc->sc_cfg_handle);
995 	ddi_remove_minor_node(dip, NULL);
996 	ddi_soft_state_free(iwp_soft_state_p, ddi_get_instance(dip));
997 
998 	return (DDI_SUCCESS);
999 }
1000 
1001 /*
1002  * destroy all locks
1003  */
1004 static void
1005 iwp_destroy_locks(iwp_sc_t *sc)
1006 {
1007 	cv_destroy(&sc->sc_mt_cv);
1008 	cv_destroy(&sc->sc_cmd_cv);
1009 	cv_destroy(&sc->sc_put_seg_cv);
1010 	cv_destroy(&sc->sc_ucode_cv);
1011 	mutex_destroy(&sc->sc_mt_lock);
1012 	mutex_destroy(&sc->sc_tx_lock);
1013 	mutex_destroy(&sc->sc_glock);
1014 }
1015 
1016 /*
1017  * Allocate an area of memory and a DMA handle for accessing it
1018  */
1019 static int
1020 iwp_alloc_dma_mem(iwp_sc_t *sc, size_t memsize,
1021     ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p,
1022     uint_t dma_flags, iwp_dma_t *dma_p)
1023 {
1024 	caddr_t vaddr;
1025 	int err = DDI_FAILURE;
1026 
1027 	/*
1028 	 * Allocate handle
1029 	 */
1030 	err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p,
1031 	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
1032 	if (err != DDI_SUCCESS) {
1033 		dma_p->dma_hdl = NULL;
1034 		return (DDI_FAILURE);
1035 	}
1036 
1037 	/*
1038 	 * Allocate memory
1039 	 */
1040 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
1041 	    dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
1042 	    DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
1043 	if (err != DDI_SUCCESS) {
1044 		ddi_dma_free_handle(&dma_p->dma_hdl);
1045 		dma_p->dma_hdl = NULL;
1046 		dma_p->acc_hdl = NULL;
1047 		return (DDI_FAILURE);
1048 	}
1049 
1050 	/*
1051 	 * Bind the two together
1052 	 */
1053 	dma_p->mem_va = vaddr;
1054 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
1055 	    vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
1056 	    &dma_p->cookie, &dma_p->ncookies);
1057 	if (err != DDI_DMA_MAPPED) {
1058 		ddi_dma_mem_free(&dma_p->acc_hdl);
1059 		ddi_dma_free_handle(&dma_p->dma_hdl);
1060 		dma_p->acc_hdl = NULL;
1061 		dma_p->dma_hdl = NULL;
1062 		return (DDI_FAILURE);
1063 	}
1064 
1065 	dma_p->nslots = ~0U;
1066 	dma_p->size = ~0U;
1067 	dma_p->token = ~0U;
1068 	dma_p->offset = 0;
1069 	return (DDI_SUCCESS);
1070 }
1071 
1072 /*
1073  * Free one allocated area of DMAable memory
1074  */
1075 static void
1076 iwp_free_dma_mem(iwp_dma_t *dma_p)
1077 {
1078 	if (dma_p->dma_hdl != NULL) {
1079 		if (dma_p->ncookies) {
1080 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
1081 			dma_p->ncookies = 0;
1082 		}
1083 		ddi_dma_free_handle(&dma_p->dma_hdl);
1084 		dma_p->dma_hdl = NULL;
1085 	}
1086 
1087 	if (dma_p->acc_hdl != NULL) {
1088 		ddi_dma_mem_free(&dma_p->acc_hdl);
1089 		dma_p->acc_hdl = NULL;
1090 	}
1091 }
1092 
1093 /*
1094  * copy ucode into dma buffers
1095  */
1096 static int
1097 iwp_alloc_fw_dma(iwp_sc_t *sc)
1098 {
1099 	int err = DDI_FAILURE;
1100 	iwp_dma_t *dma_p;
1101 	char *t;
1102 
1103 	/*
1104 	 * firmware image layout:
1105 	 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->|
1106 	 */
1107 
1108 	/*
1109 	 * Check firmware image size.
1110 	 */
1111 	if (LE_32(sc->sc_hdr->init_textsz) > RTC_INST_SIZE) {
1112 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1113 		    "firmware init text size 0x%x is too large\n",
1114 		    LE_32(sc->sc_hdr->init_textsz));
1115 
1116 		goto fail;
1117 	}
1118 
1119 	if (LE_32(sc->sc_hdr->init_datasz) > RTC_DATA_SIZE) {
1120 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1121 		    "firmware init data size 0x%x is too large\n",
1122 		    LE_32(sc->sc_hdr->init_datasz));
1123 
1124 		goto fail;
1125 	}
1126 
1127 	if (LE_32(sc->sc_hdr->textsz) > RTC_INST_SIZE) {
1128 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1129 		    "firmware text size 0x%x is too large\n",
1130 		    LE_32(sc->sc_hdr->textsz));
1131 
1132 		goto fail;
1133 	}
1134 
1135 	if (LE_32(sc->sc_hdr->datasz) > RTC_DATA_SIZE) {
1136 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1137 		    "firmware data size 0x%x is too large\n",
1138 		    LE_32(sc->sc_hdr->datasz));
1139 
1140 		goto fail;
1141 	}
1142 
1143 	/*
1144 	 * copy text of runtime ucode
1145 	 */
1146 	t = (char *)(sc->sc_hdr + 1);
1147 	err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz),
1148 	    &fw_dma_attr, &iwp_dma_accattr,
1149 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1150 	    &sc->sc_dma_fw_text);
1151 	if (err != DDI_SUCCESS) {
1152 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1153 		    "failed to allocate text dma memory.\n");
1154 		goto fail;
1155 	}
1156 
1157 	dma_p = &sc->sc_dma_fw_text;
1158 
1159 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1160 	    "text[ncookies:%d addr:%lx size:%lx]\n",
1161 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1162 	    dma_p->cookie.dmac_size));
1163 
1164 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->textsz));
1165 
1166 	/*
1167 	 * copy data and bak-data of runtime ucode
1168 	 */
1169 	t += LE_32(sc->sc_hdr->textsz);
1170 	err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1171 	    &fw_dma_attr, &iwp_dma_accattr,
1172 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1173 	    &sc->sc_dma_fw_data);
1174 	if (err != DDI_SUCCESS) {
1175 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1176 		    "failed to allocate data dma memory\n");
1177 		goto fail;
1178 	}
1179 
1180 	dma_p = &sc->sc_dma_fw_data;
1181 
1182 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1183 	    "data[ncookies:%d addr:%lx size:%lx]\n",
1184 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1185 	    dma_p->cookie.dmac_size));
1186 
1187 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1188 
1189 	err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1190 	    &fw_dma_attr, &iwp_dma_accattr,
1191 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1192 	    &sc->sc_dma_fw_data_bak);
1193 	if (err != DDI_SUCCESS) {
1194 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1195 		    "failed to allocate data bakup dma memory\n");
1196 		goto fail;
1197 	}
1198 
1199 	dma_p = &sc->sc_dma_fw_data_bak;
1200 
1201 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1202 	    "data_bak[ncookies:%d addr:%lx "
1203 	    "size:%lx]\n",
1204 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1205 	    dma_p->cookie.dmac_size));
1206 
1207 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1208 
1209 	/*
1210 	 * copy text of init ucode
1211 	 */
1212 	t += LE_32(sc->sc_hdr->datasz);
1213 	err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz),
1214 	    &fw_dma_attr, &iwp_dma_accattr,
1215 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1216 	    &sc->sc_dma_fw_init_text);
1217 	if (err != DDI_SUCCESS) {
1218 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1219 		    "failed to allocate init text dma memory\n");
1220 		goto fail;
1221 	}
1222 
1223 	dma_p = &sc->sc_dma_fw_init_text;
1224 
1225 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1226 	    "init_text[ncookies:%d addr:%lx "
1227 	    "size:%lx]\n",
1228 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1229 	    dma_p->cookie.dmac_size));
1230 
1231 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_textsz));
1232 
1233 	/*
1234 	 * copy data of init ucode
1235 	 */
1236 	t += LE_32(sc->sc_hdr->init_textsz);
1237 	err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz),
1238 	    &fw_dma_attr, &iwp_dma_accattr,
1239 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1240 	    &sc->sc_dma_fw_init_data);
1241 	if (err != DDI_SUCCESS) {
1242 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1243 		    "failed to allocate init data dma memory\n");
1244 		goto fail;
1245 	}
1246 
1247 	dma_p = &sc->sc_dma_fw_init_data;
1248 
1249 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1250 	    "init_data[ncookies:%d addr:%lx "
1251 	    "size:%lx]\n",
1252 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1253 	    dma_p->cookie.dmac_size));
1254 
1255 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_datasz));
1256 
1257 	sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz);
1258 fail:
1259 	return (err);
1260 }
1261 
1262 static void
1263 iwp_free_fw_dma(iwp_sc_t *sc)
1264 {
1265 	iwp_free_dma_mem(&sc->sc_dma_fw_text);
1266 	iwp_free_dma_mem(&sc->sc_dma_fw_data);
1267 	iwp_free_dma_mem(&sc->sc_dma_fw_data_bak);
1268 	iwp_free_dma_mem(&sc->sc_dma_fw_init_text);
1269 	iwp_free_dma_mem(&sc->sc_dma_fw_init_data);
1270 }
1271 
1272 /*
1273  * Allocate a shared buffer between host and NIC.
1274  */
1275 static int
1276 iwp_alloc_shared(iwp_sc_t *sc)
1277 {
1278 #ifdef	DEBUG
1279 	iwp_dma_t *dma_p;
1280 #endif
1281 	int err = DDI_FAILURE;
1282 
1283 	/*
1284 	 * must be aligned on a 4K-page boundary
1285 	 */
1286 	err = iwp_alloc_dma_mem(sc, sizeof (iwp_shared_t),
1287 	    &sh_dma_attr, &iwp_dma_descattr,
1288 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1289 	    &sc->sc_dma_sh);
1290 	if (err != DDI_SUCCESS) {
1291 		goto fail;
1292 	}
1293 
1294 	sc->sc_shared = (iwp_shared_t *)sc->sc_dma_sh.mem_va;
1295 
1296 #ifdef	DEBUG
1297 	dma_p = &sc->sc_dma_sh;
1298 #endif
1299 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_shared(): "
1300 	    "sh[ncookies:%d addr:%lx size:%lx]\n",
1301 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1302 	    dma_p->cookie.dmac_size));
1303 
1304 	return (err);
1305 fail:
1306 	iwp_free_shared(sc);
1307 	return (err);
1308 }
1309 
1310 static void
1311 iwp_free_shared(iwp_sc_t *sc)
1312 {
1313 	iwp_free_dma_mem(&sc->sc_dma_sh);
1314 }
1315 
1316 /*
1317  * Allocate a keep warm page.
1318  */
1319 static int
1320 iwp_alloc_kw(iwp_sc_t *sc)
1321 {
1322 #ifdef	DEBUG
1323 	iwp_dma_t *dma_p;
1324 #endif
1325 	int err = DDI_FAILURE;
1326 
1327 	/*
1328 	 * must be aligned on a 4K-page boundary
1329 	 */
1330 	err = iwp_alloc_dma_mem(sc, IWP_KW_SIZE,
1331 	    &kw_dma_attr, &iwp_dma_descattr,
1332 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1333 	    &sc->sc_dma_kw);
1334 	if (err != DDI_SUCCESS) {
1335 		goto fail;
1336 	}
1337 
1338 #ifdef	DEBUG
1339 	dma_p = &sc->sc_dma_kw;
1340 #endif
1341 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_kw(): "
1342 	    "kw[ncookies:%d addr:%lx size:%lx]\n",
1343 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1344 	    dma_p->cookie.dmac_size));
1345 
1346 	return (err);
1347 fail:
1348 	iwp_free_kw(sc);
1349 	return (err);
1350 }
1351 
1352 static void
1353 iwp_free_kw(iwp_sc_t *sc)
1354 {
1355 	iwp_free_dma_mem(&sc->sc_dma_kw);
1356 }
1357 
1358 /*
1359  * initialize RX ring buffers
1360  */
1361 static int
1362 iwp_alloc_rx_ring(iwp_sc_t *sc)
1363 {
1364 	iwp_rx_ring_t *ring;
1365 	iwp_rx_data_t *data;
1366 #ifdef	DEBUG
1367 	iwp_dma_t *dma_p;
1368 #endif
1369 	int i, err = DDI_FAILURE;
1370 
1371 	ring = &sc->sc_rxq;
1372 	ring->cur = 0;
1373 
1374 	/*
1375 	 * allocate RX description ring buffer
1376 	 */
1377 	err = iwp_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t),
1378 	    &ring_desc_dma_attr, &iwp_dma_descattr,
1379 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1380 	    &ring->dma_desc);
1381 	if (err != DDI_SUCCESS) {
1382 		IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1383 		    "dma alloc rx ring desc "
1384 		    "failed\n"));
1385 		goto fail;
1386 	}
1387 
1388 	ring->desc = (uint32_t *)ring->dma_desc.mem_va;
1389 #ifdef	DEBUG
1390 	dma_p = &ring->dma_desc;
1391 #endif
1392 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1393 	    "rx bd[ncookies:%d addr:%lx size:%lx]\n",
1394 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1395 	    dma_p->cookie.dmac_size));
1396 
1397 	/*
1398 	 * Allocate Rx frame buffers.
1399 	 */
1400 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1401 		data = &ring->data[i];
1402 		err = iwp_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1403 		    &rx_buffer_dma_attr, &iwp_dma_accattr,
1404 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1405 		    &data->dma_data);
1406 		if (err != DDI_SUCCESS) {
1407 			IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1408 			    "dma alloc rx ring "
1409 			    "buf[%d] failed\n", i));
1410 			goto fail;
1411 		}
1412 		/*
1413 		 * the physical address bit [8-36] are used,
1414 		 * instead of bit [0-31] in 3945.
1415 		 */
1416 		ring->desc[i] = (uint32_t)
1417 		    (data->dma_data.cookie.dmac_address >> 8);
1418 	}
1419 
1420 #ifdef	DEBUG
1421 	dma_p = &ring->data[0].dma_data;
1422 #endif
1423 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1424 	    "rx buffer[0][ncookies:%d addr:%lx "
1425 	    "size:%lx]\n",
1426 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1427 	    dma_p->cookie.dmac_size));
1428 
1429 	IWP_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1430 
1431 	return (err);
1432 
1433 fail:
1434 	iwp_free_rx_ring(sc);
1435 	return (err);
1436 }
1437 
1438 /*
1439  * disable RX ring
1440  */
1441 static void
1442 iwp_reset_rx_ring(iwp_sc_t *sc)
1443 {
1444 	int n;
1445 
1446 	iwp_mac_access_enter(sc);
1447 	IWP_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1448 	for (n = 0; n < 2000; n++) {
1449 		if (IWP_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24)) {
1450 			break;
1451 		}
1452 		DELAY(1000);
1453 	}
1454 #ifdef DEBUG
1455 	if (2000 == n) {
1456 		IWP_DBG((IWP_DEBUG_DMA, "iwp_reset_rx_ring(): "
1457 		    "timeout resetting Rx ring\n"));
1458 	}
1459 #endif
1460 	iwp_mac_access_exit(sc);
1461 
1462 	sc->sc_rxq.cur = 0;
1463 }
1464 
1465 static void
1466 iwp_free_rx_ring(iwp_sc_t *sc)
1467 {
1468 	int i;
1469 
1470 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1471 		if (sc->sc_rxq.data[i].dma_data.dma_hdl) {
1472 			IWP_DMA_SYNC(sc->sc_rxq.data[i].dma_data,
1473 			    DDI_DMA_SYNC_FORCPU);
1474 		}
1475 
1476 		iwp_free_dma_mem(&sc->sc_rxq.data[i].dma_data);
1477 	}
1478 
1479 	if (sc->sc_rxq.dma_desc.dma_hdl) {
1480 		IWP_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV);
1481 	}
1482 
1483 	iwp_free_dma_mem(&sc->sc_rxq.dma_desc);
1484 }
1485 
1486 /*
1487  * initialize TX ring buffers
1488  */
1489 static int
1490 iwp_alloc_tx_ring(iwp_sc_t *sc, iwp_tx_ring_t *ring,
1491     int slots, int qid)
1492 {
1493 	iwp_tx_data_t *data;
1494 	iwp_tx_desc_t *desc_h;
1495 	uint32_t paddr_desc_h;
1496 	iwp_cmd_t *cmd_h;
1497 	uint32_t paddr_cmd_h;
1498 #ifdef	DEBUG
1499 	iwp_dma_t *dma_p;
1500 #endif
1501 	int i, err = DDI_FAILURE;
1502 	ring->qid = qid;
1503 	ring->count = TFD_QUEUE_SIZE_MAX;
1504 	ring->window = slots;
1505 	ring->queued = 0;
1506 	ring->cur = 0;
1507 	ring->desc_cur = 0;
1508 
1509 	/*
1510 	 * allocate buffer for TX descriptor ring
1511 	 */
1512 	err = iwp_alloc_dma_mem(sc,
1513 	    TFD_QUEUE_SIZE_MAX * sizeof (iwp_tx_desc_t),
1514 	    &ring_desc_dma_attr, &iwp_dma_descattr,
1515 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1516 	    &ring->dma_desc);
1517 	if (err != DDI_SUCCESS) {
1518 		IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1519 		    "dma alloc tx ring desc[%d] "
1520 		    "failed\n", qid));
1521 		goto fail;
1522 	}
1523 
1524 #ifdef	DEBUG
1525 	dma_p = &ring->dma_desc;
1526 #endif
1527 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1528 	    "tx bd[ncookies:%d addr:%lx size:%lx]\n",
1529 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1530 	    dma_p->cookie.dmac_size));
1531 
1532 	desc_h = (iwp_tx_desc_t *)ring->dma_desc.mem_va;
1533 	paddr_desc_h = ring->dma_desc.cookie.dmac_address;
1534 
1535 	/*
1536 	 * allocate buffer for ucode command
1537 	 */
1538 	err = iwp_alloc_dma_mem(sc,
1539 	    TFD_QUEUE_SIZE_MAX * sizeof (iwp_cmd_t),
1540 	    &cmd_dma_attr, &iwp_dma_accattr,
1541 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1542 	    &ring->dma_cmd);
1543 	if (err != DDI_SUCCESS) {
1544 		IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1545 		    "dma alloc tx ring cmd[%d]"
1546 		    " failed\n", qid));
1547 		goto fail;
1548 	}
1549 
1550 #ifdef	DEBUG
1551 	dma_p = &ring->dma_cmd;
1552 #endif
1553 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1554 	    "tx cmd[ncookies:%d addr:%lx size:%lx]\n",
1555 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1556 	    dma_p->cookie.dmac_size));
1557 
1558 	cmd_h = (iwp_cmd_t *)ring->dma_cmd.mem_va;
1559 	paddr_cmd_h = ring->dma_cmd.cookie.dmac_address;
1560 
1561 	/*
1562 	 * Allocate Tx frame buffers.
1563 	 */
1564 	ring->data = kmem_zalloc(sizeof (iwp_tx_data_t) * TFD_QUEUE_SIZE_MAX,
1565 	    KM_NOSLEEP);
1566 	if (NULL == ring->data) {
1567 		IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1568 		    "could not allocate "
1569 		    "tx data slots\n"));
1570 		goto fail;
1571 	}
1572 
1573 	for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) {
1574 		data = &ring->data[i];
1575 		err = iwp_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1576 		    &tx_buffer_dma_attr, &iwp_dma_accattr,
1577 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
1578 		    &data->dma_data);
1579 		if (err != DDI_SUCCESS) {
1580 			IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1581 			    "dma alloc tx "
1582 			    "ring buf[%d] failed\n", i));
1583 			goto fail;
1584 		}
1585 
1586 		data->desc = desc_h + i;
1587 		data->paddr_desc = paddr_desc_h +
1588 		    _PTRDIFF(data->desc, desc_h);
1589 		data->cmd = cmd_h +  i;
1590 		data->paddr_cmd = paddr_cmd_h +
1591 		    _PTRDIFF(data->cmd, cmd_h);
1592 	}
1593 #ifdef	DEBUG
1594 	dma_p = &ring->data[0].dma_data;
1595 #endif
1596 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1597 	    "tx buffer[0][ncookies:%d addr:%lx "
1598 	    "size:%lx]\n",
1599 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1600 	    dma_p->cookie.dmac_size));
1601 
1602 	return (err);
1603 
1604 fail:
1605 	iwp_free_tx_ring(ring);
1606 
1607 	return (err);
1608 }
1609 
1610 /*
1611  * disable TX ring
1612  */
1613 static void
1614 iwp_reset_tx_ring(iwp_sc_t *sc, iwp_tx_ring_t *ring)
1615 {
1616 	iwp_tx_data_t *data;
1617 	int i, n;
1618 
1619 	iwp_mac_access_enter(sc);
1620 
1621 	IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0);
1622 	for (n = 0; n < 200; n++) {
1623 		if (IWP_READ(sc, IWP_FH_TSSR_TX_STATUS_REG) &
1624 		    IWP_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid)) {
1625 			break;
1626 		}
1627 		DELAY(10);
1628 	}
1629 
1630 #ifdef	DEBUG
1631 	if (200 == n) {
1632 		IWP_DBG((IWP_DEBUG_DMA, "iwp_reset_tx_ring(): "
1633 		    "timeout reset tx ring %d\n",
1634 		    ring->qid));
1635 	}
1636 #endif
1637 
1638 	iwp_mac_access_exit(sc);
1639 
1640 	/* by pass, if it's quiesce */
1641 	if (!(sc->sc_flags & IWP_F_QUIESCED)) {
1642 		for (i = 0; i < ring->count; i++) {
1643 			data = &ring->data[i];
1644 			IWP_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
1645 		}
1646 	}
1647 
1648 	ring->queued = 0;
1649 	ring->cur = 0;
1650 	ring->desc_cur = 0;
1651 }
1652 
1653 static void
1654 iwp_free_tx_ring(iwp_tx_ring_t *ring)
1655 {
1656 	int i;
1657 
1658 	if (ring->dma_desc.dma_hdl != NULL) {
1659 		IWP_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1660 	}
1661 	iwp_free_dma_mem(&ring->dma_desc);
1662 
1663 	if (ring->dma_cmd.dma_hdl != NULL) {
1664 		IWP_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV);
1665 	}
1666 	iwp_free_dma_mem(&ring->dma_cmd);
1667 
1668 	if (ring->data != NULL) {
1669 		for (i = 0; i < ring->count; i++) {
1670 			if (ring->data[i].dma_data.dma_hdl) {
1671 				IWP_DMA_SYNC(ring->data[i].dma_data,
1672 				    DDI_DMA_SYNC_FORDEV);
1673 			}
1674 			iwp_free_dma_mem(&ring->data[i].dma_data);
1675 		}
1676 		kmem_free(ring->data, ring->count * sizeof (iwp_tx_data_t));
1677 	}
1678 }
1679 
1680 /*
1681  * initialize TX and RX ring
1682  */
1683 static int
1684 iwp_ring_init(iwp_sc_t *sc)
1685 {
1686 	int i, err = DDI_FAILURE;
1687 
1688 	for (i = 0; i < IWP_NUM_QUEUES; i++) {
1689 		if (IWP_CMD_QUEUE_NUM == i) {
1690 			continue;
1691 		}
1692 
1693 		err = iwp_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS,
1694 		    i);
1695 		if (err != DDI_SUCCESS) {
1696 			goto fail;
1697 		}
1698 	}
1699 
1700 	/*
1701 	 * initialize command queue
1702 	 */
1703 	err = iwp_alloc_tx_ring(sc, &sc->sc_txq[IWP_CMD_QUEUE_NUM],
1704 	    TFD_CMD_SLOTS, IWP_CMD_QUEUE_NUM);
1705 	if (err != DDI_SUCCESS) {
1706 		goto fail;
1707 	}
1708 
1709 	err = iwp_alloc_rx_ring(sc);
1710 	if (err != DDI_SUCCESS) {
1711 		goto fail;
1712 	}
1713 
1714 fail:
1715 	return (err);
1716 }
1717 
1718 static void
1719 iwp_ring_free(iwp_sc_t *sc)
1720 {
1721 	int i = IWP_NUM_QUEUES;
1722 
1723 	iwp_free_rx_ring(sc);
1724 	while (--i >= 0) {
1725 		iwp_free_tx_ring(&sc->sc_txq[i]);
1726 	}
1727 }
1728 
1729 /* ARGSUSED */
1730 static ieee80211_node_t *
1731 iwp_node_alloc(ieee80211com_t *ic)
1732 {
1733 	iwp_amrr_t *amrr;
1734 
1735 	amrr = kmem_zalloc(sizeof (iwp_amrr_t), KM_SLEEP);
1736 	if (NULL == amrr) {
1737 		cmn_err(CE_WARN, "iwp_node_alloc(): "
1738 		    "failed to allocate memory for amrr structure\n");
1739 		return (NULL);
1740 	}
1741 
1742 	iwp_amrr_init(amrr);
1743 
1744 	return (&amrr->in);
1745 }
1746 
1747 static void
1748 iwp_node_free(ieee80211_node_t *in)
1749 {
1750 	ieee80211com_t *ic;
1751 
1752 	if ((NULL == in) ||
1753 	    (NULL == in->in_ic)) {
1754 		cmn_err(CE_WARN, "iwp_node_free() "
1755 		    "Got a NULL point from Net80211 module\n");
1756 		return;
1757 	}
1758 	ic = in->in_ic;
1759 
1760 	if (ic->ic_node_cleanup != NULL) {
1761 		ic->ic_node_cleanup(in);
1762 	}
1763 
1764 	if (in->in_wpa_ie != NULL) {
1765 		ieee80211_free(in->in_wpa_ie);
1766 	}
1767 
1768 	if (in->in_wme_ie != NULL) {
1769 		ieee80211_free(in->in_wme_ie);
1770 	}
1771 
1772 	if (in->in_htcap_ie != NULL) {
1773 		ieee80211_free(in->in_htcap_ie);
1774 	}
1775 
1776 	kmem_free(in, sizeof (iwp_amrr_t));
1777 }
1778 
1779 
1780 /*
1781  * change station's state. this function will be invoked by 80211 module
1782  * when need to change staton's state.
1783  */
1784 static int
1785 iwp_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1786 {
1787 	iwp_sc_t *sc;
1788 	ieee80211_node_t *in;
1789 	enum ieee80211_state ostate;
1790 	iwp_add_sta_t node;
1791 	int i, err = IWP_FAIL;
1792 
1793 	if (NULL == ic) {
1794 		return (err);
1795 	}
1796 	sc = (iwp_sc_t *)ic;
1797 	in = ic->ic_bss;
1798 	ostate = ic->ic_state;
1799 
1800 	mutex_enter(&sc->sc_glock);
1801 
1802 	switch (nstate) {
1803 	case IEEE80211_S_SCAN:
1804 		switch (ostate) {
1805 		case IEEE80211_S_INIT:
1806 			atomic_or_32(&sc->sc_flags, IWP_F_SCANNING);
1807 			iwp_set_led(sc, 2, 10, 2);
1808 
1809 			/*
1810 			 * clear association to receive beacons from
1811 			 * all BSS'es
1812 			 */
1813 			sc->sc_config.assoc_id = 0;
1814 			sc->sc_config.filter_flags &=
1815 			    ~LE_32(RXON_FILTER_ASSOC_MSK);
1816 
1817 			IWP_DBG((IWP_DEBUG_80211, "iwp_newstate(): "
1818 			    "config chan %d "
1819 			    "flags %x filter_flags %x\n",
1820 			    LE_16(sc->sc_config.chan),
1821 			    LE_32(sc->sc_config.flags),
1822 			    LE_32(sc->sc_config.filter_flags)));
1823 
1824 			err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
1825 			    sizeof (iwp_rxon_cmd_t), 1);
1826 			if (err != IWP_SUCCESS) {
1827 				cmn_err(CE_WARN, "iwp_newstate(): "
1828 				    "could not clear association\n");
1829 				atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1830 				mutex_exit(&sc->sc_glock);
1831 				return (err);
1832 			}
1833 
1834 			/* add broadcast node to send probe request */
1835 			(void) memset(&node, 0, sizeof (node));
1836 			(void) memset(&node.sta.addr, 0xff, IEEE80211_ADDR_LEN);
1837 			node.sta.sta_id = IWP_BROADCAST_ID;
1838 			err = iwp_cmd(sc, REPLY_ADD_STA, &node,
1839 			    sizeof (node), 1);
1840 			if (err != IWP_SUCCESS) {
1841 				cmn_err(CE_WARN, "iwp_newstate(): "
1842 				    "could not add broadcast node\n");
1843 				atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1844 				mutex_exit(&sc->sc_glock);
1845 				return (err);
1846 			}
1847 			break;
1848 		case IEEE80211_S_SCAN:
1849 			mutex_exit(&sc->sc_glock);
1850 			/* step to next channel before actual FW scan */
1851 			err = sc->sc_newstate(ic, nstate, arg);
1852 			mutex_enter(&sc->sc_glock);
1853 			if ((err != 0) || ((err = iwp_scan(sc)) != 0)) {
1854 				cmn_err(CE_WARN, "iwp_newstate(): "
1855 				    "could not initiate scan\n");
1856 				atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1857 				ieee80211_cancel_scan(ic);
1858 			}
1859 			mutex_exit(&sc->sc_glock);
1860 			return (err);
1861 		default:
1862 			break;
1863 		}
1864 		sc->sc_clk = 0;
1865 		break;
1866 
1867 	case IEEE80211_S_AUTH:
1868 		if (ostate == IEEE80211_S_SCAN) {
1869 			atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1870 		}
1871 
1872 		/*
1873 		 * reset state to handle reassociations correctly
1874 		 */
1875 		sc->sc_config.assoc_id = 0;
1876 		sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
1877 
1878 		/*
1879 		 * before sending authentication and association request frame,
1880 		 * we need do something in the hardware, such as setting the
1881 		 * channel same to the target AP...
1882 		 */
1883 		if ((err = iwp_hw_set_before_auth(sc)) != 0) {
1884 			IWP_DBG((IWP_DEBUG_80211, "iwp_newstate(): "
1885 			    "could not send authentication request\n"));
1886 			mutex_exit(&sc->sc_glock);
1887 			return (err);
1888 		}
1889 		break;
1890 
1891 	case IEEE80211_S_RUN:
1892 		if (ostate == IEEE80211_S_SCAN) {
1893 			atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1894 		}
1895 
1896 		if (IEEE80211_M_MONITOR == ic->ic_opmode) {
1897 			/* let LED blink when monitoring */
1898 			iwp_set_led(sc, 2, 10, 10);
1899 			break;
1900 		}
1901 
1902 		IWP_DBG((IWP_DEBUG_80211, "iwp_newstate(): "
1903 		    "associated.\n"));
1904 
1905 		err = iwp_run_state_config(sc);
1906 		if (err != IWP_SUCCESS) {
1907 			cmn_err(CE_WARN, "iwp_newstate(): "
1908 			    "failed to set up association\n");
1909 			mutex_exit(&sc->sc_glock);
1910 			return (err);
1911 		}
1912 
1913 		/*
1914 		 * start automatic rate control
1915 		 */
1916 		if (IEEE80211_FIXED_RATE_NONE == ic->ic_fixed_rate) {
1917 			atomic_or_32(&sc->sc_flags, IWP_F_RATE_AUTO_CTL);
1918 
1919 			/*
1920 			 * set rate to some reasonable initial value
1921 			 */
1922 			i = in->in_rates.ir_nrates - 1;
1923 			while (i > 0 && IEEE80211_RATE(i) > 72) {
1924 				i--;
1925 			}
1926 			in->in_txrate = i;
1927 
1928 		} else {
1929 			atomic_and_32(&sc->sc_flags, ~IWP_F_RATE_AUTO_CTL);
1930 		}
1931 
1932 		/*
1933 		 * set LED on after associated
1934 		 */
1935 		iwp_set_led(sc, 2, 0, 1);
1936 		break;
1937 
1938 	case IEEE80211_S_INIT:
1939 		if (ostate == IEEE80211_S_SCAN) {
1940 			atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1941 		}
1942 		/*
1943 		 * set LED off after init
1944 		 */
1945 		iwp_set_led(sc, 2, 1, 0);
1946 		break;
1947 
1948 	case IEEE80211_S_ASSOC:
1949 		if (ostate == IEEE80211_S_SCAN) {
1950 			atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1951 		}
1952 		break;
1953 	}
1954 
1955 	mutex_exit(&sc->sc_glock);
1956 
1957 	return (sc->sc_newstate(ic, nstate, arg));
1958 }
1959 
1960 /*
1961  * exclusive access to mac begin.
1962  */
1963 static void
1964 iwp_mac_access_enter(iwp_sc_t *sc)
1965 {
1966 	uint32_t tmp;
1967 	int n;
1968 
1969 	tmp = IWP_READ(sc, CSR_GP_CNTRL);
1970 	IWP_WRITE(sc, CSR_GP_CNTRL,
1971 	    tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1972 
1973 	/* wait until we succeed */
1974 	for (n = 0; n < 1000; n++) {
1975 		if ((IWP_READ(sc, CSR_GP_CNTRL) &
1976 		    (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1977 		    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) ==
1978 		    CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN) {
1979 			break;
1980 		}
1981 		DELAY(10);
1982 	}
1983 
1984 #ifdef	DEBUG
1985 	if (1000 == n) {
1986 		IWP_DBG((IWP_DEBUG_PIO, "iwp_mac_access_enter(): "
1987 		    "could not lock memory\n"));
1988 	}
1989 #endif
1990 }
1991 
1992 /*
1993  * exclusive access to mac end.
1994  */
1995 static void
1996 iwp_mac_access_exit(iwp_sc_t *sc)
1997 {
1998 	uint32_t tmp = IWP_READ(sc, CSR_GP_CNTRL);
1999 	IWP_WRITE(sc, CSR_GP_CNTRL,
2000 	    tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2001 }
2002 
2003 /*
2004  * this function defined here for future use.
2005  * static uint32_t
2006  * iwp_mem_read(iwp_sc_t *sc, uint32_t addr)
2007  * {
2008  * 	IWP_WRITE(sc, HBUS_TARG_MEM_RADDR, addr);
2009  * 	return (IWP_READ(sc, HBUS_TARG_MEM_RDAT));
2010  * }
2011  */
2012 
2013 /*
2014  * write mac memory
2015  */
2016 static void
2017 iwp_mem_write(iwp_sc_t *sc, uint32_t addr, uint32_t data)
2018 {
2019 	IWP_WRITE(sc, HBUS_TARG_MEM_WADDR, addr);
2020 	IWP_WRITE(sc, HBUS_TARG_MEM_WDAT, data);
2021 }
2022 
2023 /*
2024  * read mac register
2025  */
2026 static uint32_t
2027 iwp_reg_read(iwp_sc_t *sc, uint32_t addr)
2028 {
2029 	IWP_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24));
2030 	return (IWP_READ(sc, HBUS_TARG_PRPH_RDAT));
2031 }
2032 
2033 /*
2034  * write mac register
2035  */
2036 static void
2037 iwp_reg_write(iwp_sc_t *sc, uint32_t addr, uint32_t data)
2038 {
2039 	IWP_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24));
2040 	IWP_WRITE(sc, HBUS_TARG_PRPH_WDAT, data);
2041 }
2042 
2043 
2044 /*
2045  * steps of loading ucode:
2046  * load init ucode=>init alive=>calibrate=>
2047  * receive calibration result=>reinitialize NIC=>
2048  * load runtime ucode=>runtime alive=>
2049  * send calibration result=>running.
2050  */
2051 static int
2052 iwp_load_init_firmware(iwp_sc_t *sc)
2053 {
2054 	int	err = IWP_FAIL;
2055 	clock_t	clk;
2056 
2057 	atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2058 
2059 	/*
2060 	 * load init_text section of uCode to hardware
2061 	 */
2062 	err = iwp_put_seg_fw(sc, sc->sc_dma_fw_init_text.cookie.dmac_address,
2063 	    RTC_INST_LOWER_BOUND, sc->sc_dma_fw_init_text.cookie.dmac_size);
2064 	if (err != IWP_SUCCESS) {
2065 		cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2066 		    "failed to write init uCode.\n");
2067 		return (err);
2068 	}
2069 
2070 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
2071 
2072 	/* wait loading init_text until completed or timeout */
2073 	while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2074 		if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2075 			break;
2076 		}
2077 	}
2078 
2079 	if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2080 		cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2081 		    "timeout waiting for init uCode load.\n");
2082 		return (IWP_FAIL);
2083 	}
2084 
2085 	atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2086 
2087 	/*
2088 	 * load init_data section of uCode to hardware
2089 	 */
2090 	err = iwp_put_seg_fw(sc, sc->sc_dma_fw_init_data.cookie.dmac_address,
2091 	    RTC_DATA_LOWER_BOUND, sc->sc_dma_fw_init_data.cookie.dmac_size);
2092 	if (err != IWP_SUCCESS) {
2093 		cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2094 		    "failed to write init_data uCode.\n");
2095 		return (err);
2096 	}
2097 
2098 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
2099 
2100 	/*
2101 	 * wait loading init_data until completed or timeout
2102 	 */
2103 	while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2104 		if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2105 			break;
2106 		}
2107 	}
2108 
2109 	if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2110 		cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2111 		    "timeout waiting for init_data uCode load.\n");
2112 		return (IWP_FAIL);
2113 	}
2114 
2115 	atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2116 
2117 	return (err);
2118 }
2119 
2120 static int
2121 iwp_load_run_firmware(iwp_sc_t *sc)
2122 {
2123 	int	err = IWP_FAIL;
2124 	clock_t	clk;
2125 
2126 	atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2127 
2128 	/*
2129 	 * load init_text section of uCode to hardware
2130 	 */
2131 	err = iwp_put_seg_fw(sc, sc->sc_dma_fw_text.cookie.dmac_address,
2132 	    RTC_INST_LOWER_BOUND, sc->sc_dma_fw_text.cookie.dmac_size);
2133 	if (err != IWP_SUCCESS) {
2134 		cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2135 		    "failed to write run uCode.\n");
2136 		return (err);
2137 	}
2138 
2139 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
2140 
2141 	/* wait loading run_text until completed or timeout */
2142 	while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2143 		if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2144 			break;
2145 		}
2146 	}
2147 
2148 	if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2149 		cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2150 		    "timeout waiting for run uCode load.\n");
2151 		return (IWP_FAIL);
2152 	}
2153 
2154 	atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2155 
2156 	/*
2157 	 * load run_data section of uCode to hardware
2158 	 */
2159 	err = iwp_put_seg_fw(sc, sc->sc_dma_fw_data_bak.cookie.dmac_address,
2160 	    RTC_DATA_LOWER_BOUND, sc->sc_dma_fw_data.cookie.dmac_size);
2161 	if (err != IWP_SUCCESS) {
2162 		cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2163 		    "failed to write run_data uCode.\n");
2164 		return (err);
2165 	}
2166 
2167 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
2168 
2169 	/*
2170 	 * wait loading run_data until completed or timeout
2171 	 */
2172 	while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2173 		if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2174 			break;
2175 		}
2176 	}
2177 
2178 	if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2179 		cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2180 		    "timeout waiting for run_data uCode load.\n");
2181 		return (IWP_FAIL);
2182 	}
2183 
2184 	atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2185 
2186 	return (err);
2187 }
2188 
2189 /*
2190  * this function will be invoked to receive phy information
2191  * when a frame is received.
2192  */
2193 static void
2194 iwp_rx_phy_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2195 {
2196 
2197 	sc->sc_rx_phy_res.flag = 1;
2198 
2199 	(void) memcpy(sc->sc_rx_phy_res.buf, (uint8_t *)(desc + 1),
2200 	    sizeof (iwp_rx_phy_res_t));
2201 }
2202 
2203 /*
2204  * this function will be invoked to receive body of frame when
2205  * a frame is received.
2206  */
2207 static void
2208 iwp_rx_mpdu_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2209 {
2210 	ieee80211com_t	*ic = &sc->sc_ic;
2211 #ifdef	DEBUG
2212 	iwp_rx_ring_t	*ring = &sc->sc_rxq;
2213 #endif
2214 	struct ieee80211_frame		*wh;
2215 	struct iwp_rx_non_cfg_phy	*phyinfo;
2216 	struct	iwp_rx_mpdu_body_size	*mpdu_size;
2217 
2218 	mblk_t			*mp;
2219 	int16_t			t;
2220 	uint16_t		len, rssi, agc;
2221 	uint32_t		temp, crc, *tail;
2222 	uint32_t		arssi, brssi, crssi, mrssi;
2223 	iwp_rx_phy_res_t	*stat;
2224 	ieee80211_node_t	*in;
2225 
2226 	/*
2227 	 * assuming not 11n here. cope with 11n in phase-II
2228 	 */
2229 	mpdu_size = (struct iwp_rx_mpdu_body_size *)(desc + 1);
2230 	stat = (iwp_rx_phy_res_t *)sc->sc_rx_phy_res.buf;
2231 	if (stat->cfg_phy_cnt > 20) {
2232 		return;
2233 	}
2234 
2235 	phyinfo = (struct iwp_rx_non_cfg_phy *)stat->non_cfg_phy;
2236 	temp = LE_32(phyinfo->non_cfg_phy[IWP_RX_RES_AGC_IDX]);
2237 	agc = (temp & IWP_OFDM_AGC_MSK) >> IWP_OFDM_AGC_BIT_POS;
2238 
2239 	temp = LE_32(phyinfo->non_cfg_phy[IWP_RX_RES_RSSI_AB_IDX]);
2240 	arssi = (temp & IWP_OFDM_RSSI_A_MSK) >> IWP_OFDM_RSSI_A_BIT_POS;
2241 	brssi = (temp & IWP_OFDM_RSSI_B_MSK) >> IWP_OFDM_RSSI_B_BIT_POS;
2242 
2243 	temp = LE_32(phyinfo->non_cfg_phy[IWP_RX_RES_RSSI_C_IDX]);
2244 	crssi = (temp & IWP_OFDM_RSSI_C_MSK) >> IWP_OFDM_RSSI_C_BIT_POS;
2245 
2246 	mrssi = MAX(arssi, brssi);
2247 	mrssi = MAX(mrssi, crssi);
2248 
2249 	t = mrssi - agc - IWP_RSSI_OFFSET;
2250 	/*
2251 	 * convert dBm to percentage
2252 	 */
2253 	rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t)))
2254 	    / (75 * 75);
2255 	if (rssi > 100) {
2256 		rssi = 100;
2257 	}
2258 	if (rssi < 1) {
2259 		rssi = 1;
2260 	}
2261 
2262 	/*
2263 	 * size of frame, not include FCS
2264 	 */
2265 	len = LE_16(mpdu_size->byte_count);
2266 	tail = (uint32_t *)((uint8_t *)(desc + 1) +
2267 	    sizeof (struct iwp_rx_mpdu_body_size) + len);
2268 	bcopy(tail, &crc, 4);
2269 
2270 	IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2271 	    "rx intr: idx=%d phy_len=%x len=%d "
2272 	    "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x "
2273 	    "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat),
2274 	    len, stat->rate.r.s.rate, stat->channel,
2275 	    LE_32(stat->timestampl), stat->non_cfg_phy_cnt,
2276 	    stat->cfg_phy_cnt, LE_32(crc)));
2277 
2278 	if ((len < 16) || (len > sc->sc_dmabuf_sz)) {
2279 		IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2280 		    "rx frame oversize\n"));
2281 		return;
2282 	}
2283 
2284 	/*
2285 	 * discard Rx frames with bad CRC
2286 	 */
2287 	if ((LE_32(crc) &
2288 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) !=
2289 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) {
2290 		IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2291 		    "rx crc error tail: %x\n",
2292 		    LE_32(crc)));
2293 		sc->sc_rx_err++;
2294 		return;
2295 	}
2296 
2297 	wh = (struct ieee80211_frame *)
2298 	    ((uint8_t *)(desc + 1)+ sizeof (struct iwp_rx_mpdu_body_size));
2299 
2300 	if (IEEE80211_FC0_SUBTYPE_ASSOC_RESP == *(uint8_t *)wh) {
2301 		sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2);
2302 		IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2303 		    "rx : association id = %x\n",
2304 		    sc->sc_assoc_id));
2305 	}
2306 
2307 #ifdef DEBUG
2308 	if (iwp_dbg_flags & IWP_DEBUG_RX) {
2309 		ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0);
2310 	}
2311 #endif
2312 
2313 	in = ieee80211_find_rxnode(ic, wh);
2314 	mp = allocb(len, BPRI_MED);
2315 	if (mp) {
2316 		(void) memcpy(mp->b_wptr, wh, len);
2317 		mp->b_wptr += len;
2318 
2319 		/*
2320 		 * send the frame to the 802.11 layer
2321 		 */
2322 		(void) ieee80211_input(ic, mp, in, rssi, 0);
2323 	} else {
2324 		sc->sc_rx_nobuf++;
2325 		IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2326 		    "alloc rx buf failed\n"));
2327 	}
2328 
2329 	/*
2330 	 * release node reference
2331 	 */
2332 	ieee80211_free_node(in);
2333 }
2334 
2335 /*
2336  * process correlative affairs after a frame is sent.
2337  */
2338 static void
2339 iwp_tx_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2340 {
2341 	ieee80211com_t *ic = &sc->sc_ic;
2342 	iwp_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3];
2343 	iwp_tx_stat_t *stat = (iwp_tx_stat_t *)(desc + 1);
2344 	iwp_amrr_t *amrr;
2345 
2346 	if (NULL == ic->ic_bss) {
2347 		return;
2348 	}
2349 
2350 	amrr = (iwp_amrr_t *)ic->ic_bss;
2351 
2352 	amrr->txcnt++;
2353 	IWP_DBG((IWP_DEBUG_RATECTL, "iwp_tx_intr(): "
2354 	    "tx: %d cnt\n", amrr->txcnt));
2355 
2356 	if (stat->ntries > 0) {
2357 		amrr->retrycnt++;
2358 		sc->sc_tx_retries++;
2359 		IWP_DBG((IWP_DEBUG_TX, "iwp_tx_intr(): "
2360 		    "tx: %d retries\n",
2361 		    sc->sc_tx_retries));
2362 	}
2363 
2364 	mutex_enter(&sc->sc_mt_lock);
2365 	sc->sc_tx_timer = 0;
2366 	mutex_exit(&sc->sc_mt_lock);
2367 
2368 	mutex_enter(&sc->sc_tx_lock);
2369 
2370 	ring->queued--;
2371 	if (ring->queued < 0) {
2372 		ring->queued = 0;
2373 	}
2374 
2375 	if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count >> 3))) {
2376 		sc->sc_need_reschedule = 0;
2377 		mutex_exit(&sc->sc_tx_lock);
2378 		mac_tx_update(ic->ic_mach);
2379 		mutex_enter(&sc->sc_tx_lock);
2380 	}
2381 
2382 	mutex_exit(&sc->sc_tx_lock);
2383 }
2384 
2385 /*
2386  * inform a given command has been executed
2387  */
2388 static void
2389 iwp_cmd_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2390 {
2391 	if ((desc->hdr.qid & 7) != 4) {
2392 		return;
2393 	}
2394 
2395 	if (sc->sc_cmd_accum > 0) {
2396 		sc->sc_cmd_accum--;
2397 		return;
2398 	}
2399 
2400 	mutex_enter(&sc->sc_glock);
2401 
2402 	sc->sc_cmd_flag = SC_CMD_FLG_DONE;
2403 
2404 	cv_signal(&sc->sc_cmd_cv);
2405 
2406 	mutex_exit(&sc->sc_glock);
2407 
2408 	IWP_DBG((IWP_DEBUG_CMD, "iwp_cmd_intr(): "
2409 	    "qid=%x idx=%d flags=%x type=0x%x\n",
2410 	    desc->hdr.qid, desc->hdr.idx, desc->hdr.flags,
2411 	    desc->hdr.type));
2412 }
2413 
2414 /*
2415  * this function will be invoked when alive notification occur.
2416  */
2417 static void
2418 iwp_ucode_alive(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2419 {
2420 	uint32_t rv;
2421 	struct iwp_calib_cfg_cmd cmd;
2422 	struct iwp_alive_resp *ar =
2423 	    (struct iwp_alive_resp *)(desc + 1);
2424 	struct iwp_calib_results *res_p = &sc->sc_calib_results;
2425 
2426 	/*
2427 	 * the microcontroller is ready
2428 	 */
2429 	IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2430 	    "microcode alive notification minor: %x major: %x type: "
2431 	    "%x subtype: %x\n",
2432 	    ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype));
2433 
2434 #ifdef	DEBUG
2435 	if (LE_32(ar->is_valid) != UCODE_VALID_OK) {
2436 		IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2437 		    "microcontroller initialization failed\n"));
2438 	}
2439 #endif
2440 
2441 	/*
2442 	 * determine if init alive or runtime alive.
2443 	 */
2444 	if (INITIALIZE_SUBTYPE == ar->ver_subtype) {
2445 		IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2446 		    "initialization alive received.\n"));
2447 
2448 		(void) memcpy(&sc->sc_card_alive_init, ar,
2449 		    sizeof (struct iwp_init_alive_resp));
2450 
2451 		/*
2452 		 * necessary configuration to NIC
2453 		 */
2454 		mutex_enter(&sc->sc_glock);
2455 
2456 		rv = iwp_alive_common(sc);
2457 		if (rv != IWP_SUCCESS) {
2458 			cmn_err(CE_WARN, "iwp_ucode_alive(): "
2459 			    "common alive process failed in init alive.\n");
2460 			mutex_exit(&sc->sc_glock);
2461 			return;
2462 		}
2463 
2464 		(void) memset(&cmd, 0, sizeof (cmd));
2465 
2466 		cmd.ucd_calib_cfg.once.is_enable = IWP_CALIB_INIT_CFG_ALL;
2467 		cmd.ucd_calib_cfg.once.start = IWP_CALIB_INIT_CFG_ALL;
2468 		cmd.ucd_calib_cfg.once.send_res = IWP_CALIB_INIT_CFG_ALL;
2469 		cmd.ucd_calib_cfg.flags = IWP_CALIB_INIT_CFG_ALL;
2470 
2471 		/*
2472 		 * require ucode execute calibration
2473 		 */
2474 		rv = iwp_cmd(sc, CALIBRATION_CFG_CMD, &cmd, sizeof (cmd), 1);
2475 		if (rv != IWP_SUCCESS) {
2476 			cmn_err(CE_WARN, "iwp_ucode_alive(): "
2477 			    "failed to send calibration configure command.\n");
2478 			mutex_exit(&sc->sc_glock);
2479 			return;
2480 		}
2481 
2482 		mutex_exit(&sc->sc_glock);
2483 
2484 	} else {	/* runtime alive */
2485 
2486 		IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2487 		    "runtime alive received.\n"));
2488 
2489 		(void) memcpy(&sc->sc_card_alive_run, ar,
2490 		    sizeof (struct iwp_alive_resp));
2491 
2492 		mutex_enter(&sc->sc_glock);
2493 
2494 		/*
2495 		 * necessary configuration to NIC
2496 		 */
2497 		rv = iwp_alive_common(sc);
2498 		if (rv != IWP_SUCCESS) {
2499 			cmn_err(CE_WARN, "iwp_ucode_alive(): "
2500 			    "common alive process failed in run alive.\n");
2501 			mutex_exit(&sc->sc_glock);
2502 			return;
2503 		}
2504 
2505 		/*
2506 		 * send the result of local oscilator calibration to uCode.
2507 		 */
2508 		if (res_p->lo_res != NULL) {
2509 			rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2510 			    res_p->lo_res, res_p->lo_res_len, 1);
2511 			if (rv != IWP_SUCCESS) {
2512 				cmn_err(CE_WARN, "iwp_ucode_alive(): "
2513 				    "failed to send local"
2514 				    "oscilator calibration command.\n");
2515 				mutex_exit(&sc->sc_glock);
2516 				return;
2517 			}
2518 
2519 			DELAY(1000);
2520 		}
2521 
2522 		/*
2523 		 * send the result of TX IQ calibration to uCode.
2524 		 */
2525 		if (res_p->tx_iq_res != NULL) {
2526 			rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2527 			    res_p->tx_iq_res, res_p->tx_iq_res_len, 1);
2528 			if (rv != IWP_SUCCESS) {
2529 				cmn_err(CE_WARN, "iwp_ucode_alive(): "
2530 				    "failed to send TX IQ"
2531 				    "calibration command.\n");
2532 				mutex_exit(&sc->sc_glock);
2533 				return;
2534 			}
2535 
2536 			DELAY(1000);
2537 		}
2538 
2539 		/*
2540 		 * send the result of TX IQ perd calibration to uCode.
2541 		 */
2542 		if (res_p->tx_iq_perd_res != NULL) {
2543 			rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2544 			    res_p->tx_iq_perd_res,
2545 			    res_p->tx_iq_perd_res_len, 1);
2546 			if (rv != IWP_SUCCESS) {
2547 				cmn_err(CE_WARN, "iwp_ucode_alive(): "
2548 				    "failed to send TX IQ perd"
2549 				    "calibration command.\n");
2550 				mutex_exit(&sc->sc_glock);
2551 				return;
2552 			}
2553 
2554 			DELAY(1000);
2555 		}
2556 
2557 		/*
2558 		 * send the result of Base Band calibration to uCode.
2559 		 */
2560 		if (res_p->base_band_res != NULL) {
2561 			rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2562 			    res_p->base_band_res,
2563 			    res_p->base_band_res_len, 1);
2564 			if (rv != IWP_SUCCESS) {
2565 				cmn_err(CE_WARN, "iwp_ucode_alive(): "
2566 				    "failed to send Base Band"
2567 				    "calibration command.\n");
2568 				mutex_exit(&sc->sc_glock);
2569 				return;
2570 			}
2571 
2572 			DELAY(1000);
2573 		}
2574 
2575 		atomic_or_32(&sc->sc_flags, IWP_F_FW_INIT);
2576 		cv_signal(&sc->sc_ucode_cv);
2577 
2578 		mutex_exit(&sc->sc_glock);
2579 	}
2580 
2581 }
2582 
2583 /*
2584  * deal with receiving frames, command response
2585  * and all notifications from ucode.
2586  */
2587 /* ARGSUSED */
2588 static uint_t
2589 iwp_rx_softintr(caddr_t arg, caddr_t unused)
2590 {
2591 	iwp_sc_t *sc;
2592 	ieee80211com_t *ic;
2593 	iwp_rx_desc_t *desc;
2594 	iwp_rx_data_t *data;
2595 	uint32_t index;
2596 
2597 	if (NULL == arg) {
2598 		return (DDI_INTR_UNCLAIMED);
2599 	}
2600 	sc = (iwp_sc_t *)arg;
2601 	ic = &sc->sc_ic;
2602 
2603 	/*
2604 	 * firmware has moved the index of the rx queue, driver get it,
2605 	 * and deal with it.
2606 	 */
2607 	index = (sc->sc_shared->val0) & 0xfff;
2608 
2609 	while (sc->sc_rxq.cur != index) {
2610 		data = &sc->sc_rxq.data[sc->sc_rxq.cur];
2611 		desc = (iwp_rx_desc_t *)data->dma_data.mem_va;
2612 
2613 		IWP_DBG((IWP_DEBUG_INTR, "iwp_rx_softintr(): "
2614 		    "rx notification index = %d"
2615 		    " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n",
2616 		    index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx,
2617 		    desc->hdr.flags, desc->hdr.type, LE_32(desc->len)));
2618 
2619 		/*
2620 		 * a command other than a tx need to be replied
2621 		 */
2622 		if (!(desc->hdr.qid & 0x80) &&
2623 		    (desc->hdr.type != REPLY_SCAN_CMD) &&
2624 		    (desc->hdr.type != REPLY_TX)) {
2625 			iwp_cmd_intr(sc, desc);
2626 		}
2627 
2628 		switch (desc->hdr.type) {
2629 		case REPLY_RX_PHY_CMD:
2630 			iwp_rx_phy_intr(sc, desc);
2631 			break;
2632 
2633 		case REPLY_RX_MPDU_CMD:
2634 			iwp_rx_mpdu_intr(sc, desc);
2635 			break;
2636 
2637 		case REPLY_TX:
2638 			iwp_tx_intr(sc, desc);
2639 			break;
2640 
2641 		case REPLY_ALIVE:
2642 			iwp_ucode_alive(sc, desc);
2643 			break;
2644 
2645 		case CARD_STATE_NOTIFICATION:
2646 		{
2647 			uint32_t *status = (uint32_t *)(desc + 1);
2648 
2649 			IWP_DBG((IWP_DEBUG_RADIO, "iwp_rx_softintr(): "
2650 			    "state changed to %x\n",
2651 			    LE_32(*status)));
2652 
2653 			if (LE_32(*status) & 1) {
2654 				/*
2655 				 * the radio button has to be pushed(OFF). It
2656 				 * is considered as a hw error, the
2657 				 * iwp_thread() tries to recover it after the
2658 				 * button is pushed again(ON)
2659 				 */
2660 				cmn_err(CE_NOTE, "iwp_rx_softintr(): "
2661 				    "radio transmitter is off\n");
2662 				sc->sc_ostate = sc->sc_ic.ic_state;
2663 				ieee80211_new_state(&sc->sc_ic,
2664 				    IEEE80211_S_INIT, -1);
2665 				atomic_or_32(&sc->sc_flags,
2666 				    IWP_F_HW_ERR_RECOVER | IWP_F_RADIO_OFF);
2667 			}
2668 
2669 			break;
2670 		}
2671 
2672 		case SCAN_START_NOTIFICATION:
2673 		{
2674 			iwp_start_scan_t *scan =
2675 			    (iwp_start_scan_t *)(desc + 1);
2676 
2677 			IWP_DBG((IWP_DEBUG_SCAN, "iwp_rx_softintr(): "
2678 			    "scanning channel %d status %x\n",
2679 			    scan->chan, LE_32(scan->status)));
2680 
2681 			ic->ic_curchan = &ic->ic_sup_channels[scan->chan];
2682 			break;
2683 		}
2684 
2685 		case SCAN_COMPLETE_NOTIFICATION:
2686 		{
2687 #ifdef	DEBUG
2688 			iwp_stop_scan_t *scan =
2689 			    (iwp_stop_scan_t *)(desc + 1);
2690 
2691 			IWP_DBG((IWP_DEBUG_SCAN, "iwp_rx_softintr(): "
2692 			    "completed channel %d (burst of %d) status %02x\n",
2693 			    scan->chan, scan->nchan, scan->status));
2694 #endif
2695 
2696 			sc->sc_scan_pending++;
2697 			break;
2698 		}
2699 
2700 		case STATISTICS_NOTIFICATION:
2701 		{
2702 			/*
2703 			 * handle statistics notification
2704 			 */
2705 			break;
2706 		}
2707 
2708 		case CALIBRATION_RES_NOTIFICATION:
2709 			iwp_save_calib_result(sc, desc);
2710 			break;
2711 
2712 		case CALIBRATION_COMPLETE_NOTIFICATION:
2713 			mutex_enter(&sc->sc_glock);
2714 			atomic_or_32(&sc->sc_flags, IWP_F_FW_INIT);
2715 			cv_signal(&sc->sc_ucode_cv);
2716 			mutex_exit(&sc->sc_glock);
2717 			break;
2718 
2719 		case MISSED_BEACONS_NOTIFICATION:
2720 		{
2721 			struct iwp_beacon_missed *miss =
2722 			    (struct iwp_beacon_missed *)(desc + 1);
2723 
2724 			if ((ic->ic_state == IEEE80211_S_RUN) &&
2725 			    (LE_32(miss->consecutive) > 50)) {
2726 				cmn_err(CE_NOTE, "iwp: iwp_rx_softintr(): "
2727 				    "beacon missed %d/%d\n",
2728 				    LE_32(miss->consecutive),
2729 				    LE_32(miss->total));
2730 				(void) ieee80211_new_state(ic,
2731 				    IEEE80211_S_INIT, -1);
2732 			}
2733 			break;
2734 		}
2735 		}
2736 
2737 		sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE;
2738 	}
2739 
2740 	/*
2741 	 * driver dealt with what received in rx queue and tell the information
2742 	 * to the firmware.
2743 	 */
2744 	index = (0 == index) ? RX_QUEUE_SIZE - 1 : index - 1;
2745 	IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7));
2746 
2747 	/*
2748 	 * re-enable interrupts
2749 	 */
2750 	IWP_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2751 
2752 	return (DDI_INTR_CLAIMED);
2753 }
2754 
2755 /*
2756  * the handle of interrupt
2757  */
2758 /* ARGSUSED */
2759 static uint_t
2760 iwp_intr(caddr_t arg, caddr_t unused)
2761 {
2762 	iwp_sc_t *sc;
2763 	uint32_t r, rfh;
2764 
2765 	if (NULL == arg) {
2766 		return (DDI_INTR_UNCLAIMED);
2767 	}
2768 	sc = (iwp_sc_t *)arg;
2769 
2770 	r = IWP_READ(sc, CSR_INT);
2771 	if (0 == r || 0xffffffff == r) {
2772 		return (DDI_INTR_UNCLAIMED);
2773 	}
2774 
2775 	IWP_DBG((IWP_DEBUG_INTR, "iwp_intr(): "
2776 	    "interrupt reg %x\n", r));
2777 
2778 	rfh = IWP_READ(sc, CSR_FH_INT_STATUS);
2779 
2780 	IWP_DBG((IWP_DEBUG_INTR, "iwp_intr(): "
2781 	    "FH interrupt reg %x\n", rfh));
2782 
2783 	/*
2784 	 * disable interrupts
2785 	 */
2786 	IWP_WRITE(sc, CSR_INT_MASK, 0);
2787 
2788 	/*
2789 	 * ack interrupts
2790 	 */
2791 	IWP_WRITE(sc, CSR_INT, r);
2792 	IWP_WRITE(sc, CSR_FH_INT_STATUS, rfh);
2793 
2794 	if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) {
2795 		IWP_DBG((IWP_DEBUG_FW, "iwp_intr(): "
2796 		    "fatal firmware error\n"));
2797 		iwp_stop(sc);
2798 		sc->sc_ostate = sc->sc_ic.ic_state;
2799 
2800 		/* notify upper layer */
2801 		if (!IWP_CHK_FAST_RECOVER(sc)) {
2802 			ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
2803 		}
2804 
2805 		atomic_or_32(&sc->sc_flags, IWP_F_HW_ERR_RECOVER);
2806 		return (DDI_INTR_CLAIMED);
2807 	}
2808 
2809 	if (r & BIT_INT_RF_KILL) {
2810 		uint32_t tmp = IWP_READ(sc, CSR_GP_CNTRL);
2811 		if (tmp & (1 << 27)) {
2812 			cmn_err(CE_NOTE, "RF switch: radio on\n");
2813 		}
2814 	}
2815 
2816 	if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) ||
2817 	    (rfh & FH_INT_RX_MASK)) {
2818 		(void) ddi_intr_trigger_softint(sc->sc_soft_hdl, NULL);
2819 		return (DDI_INTR_CLAIMED);
2820 	}
2821 
2822 	if (r & BIT_INT_FH_TX) {
2823 		mutex_enter(&sc->sc_glock);
2824 		atomic_or_32(&sc->sc_flags, IWP_F_PUT_SEG);
2825 		cv_signal(&sc->sc_put_seg_cv);
2826 		mutex_exit(&sc->sc_glock);
2827 	}
2828 
2829 #ifdef	DEBUG
2830 	if (r & BIT_INT_ALIVE)	{
2831 		IWP_DBG((IWP_DEBUG_FW, "iwp_intr(): "
2832 		    "firmware initialized.\n"));
2833 	}
2834 #endif
2835 
2836 	/*
2837 	 * re-enable interrupts
2838 	 */
2839 	IWP_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2840 
2841 	return (DDI_INTR_CLAIMED);
2842 }
2843 
2844 static uint8_t
2845 iwp_rate_to_plcp(int rate)
2846 {
2847 	uint8_t ret;
2848 
2849 	switch (rate) {
2850 	/*
2851 	 * CCK rates
2852 	 */
2853 	case 2:
2854 		ret = 0xa;
2855 		break;
2856 
2857 	case 4:
2858 		ret = 0x14;
2859 		break;
2860 
2861 	case 11:
2862 		ret = 0x37;
2863 		break;
2864 
2865 	case 22:
2866 		ret = 0x6e;
2867 		break;
2868 
2869 	/*
2870 	 * OFDM rates
2871 	 */
2872 	case 12:
2873 		ret = 0xd;
2874 		break;
2875 
2876 	case 18:
2877 		ret = 0xf;
2878 		break;
2879 
2880 	case 24:
2881 		ret = 0x5;
2882 		break;
2883 
2884 	case 36:
2885 		ret = 0x7;
2886 		break;
2887 
2888 	case 48:
2889 		ret = 0x9;
2890 		break;
2891 
2892 	case 72:
2893 		ret = 0xb;
2894 		break;
2895 
2896 	case 96:
2897 		ret = 0x1;
2898 		break;
2899 
2900 	case 108:
2901 		ret = 0x3;
2902 		break;
2903 
2904 	default:
2905 		ret = 0;
2906 		break;
2907 	}
2908 
2909 	return (ret);
2910 }
2911 
2912 /*
2913  * invoked by GLD send frames
2914  */
2915 static mblk_t *
2916 iwp_m_tx(void *arg, mblk_t *mp)
2917 {
2918 	iwp_sc_t	*sc;
2919 	ieee80211com_t	*ic;
2920 	mblk_t		*next;
2921 
2922 	if (NULL == arg) {
2923 		return (NULL);
2924 	}
2925 	sc = (iwp_sc_t *)arg;
2926 	ic = &sc->sc_ic;
2927 
2928 	if (sc->sc_flags & IWP_F_SUSPEND) {
2929 		freemsgchain(mp);
2930 		return (NULL);
2931 	}
2932 
2933 	if (ic->ic_state != IEEE80211_S_RUN) {
2934 		freemsgchain(mp);
2935 		return (NULL);
2936 	}
2937 
2938 	if ((sc->sc_flags & IWP_F_HW_ERR_RECOVER) &&
2939 	    IWP_CHK_FAST_RECOVER(sc)) {
2940 		IWP_DBG((IWP_DEBUG_FW, "iwp_m_tx(): "
2941 		    "hold queue\n"));
2942 		return (mp);
2943 	}
2944 
2945 
2946 	while (mp != NULL) {
2947 		next = mp->b_next;
2948 		mp->b_next = NULL;
2949 		if (iwp_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) {
2950 			mp->b_next = next;
2951 			break;
2952 		}
2953 		mp = next;
2954 	}
2955 
2956 	return (mp);
2957 }
2958 
2959 /*
2960  * send frames
2961  */
2962 static int
2963 iwp_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
2964 {
2965 	iwp_sc_t *sc;
2966 	iwp_tx_ring_t *ring;
2967 	iwp_tx_desc_t *desc;
2968 	iwp_tx_data_t *data;
2969 	iwp_tx_data_t *desc_data;
2970 	iwp_cmd_t *cmd;
2971 	iwp_tx_cmd_t *tx;
2972 	ieee80211_node_t *in;
2973 	struct ieee80211_frame *wh;
2974 	struct ieee80211_key *k = NULL;
2975 	mblk_t *m, *m0;
2976 	int hdrlen, len, len0, mblen, off, err = IWP_SUCCESS;
2977 	uint16_t masks = 0;
2978 	uint32_t rate, s_id = 0;
2979 
2980 	if (NULL == ic) {
2981 		return (IWP_FAIL);
2982 	}
2983 	sc = (iwp_sc_t *)ic;
2984 
2985 	if (sc->sc_flags & IWP_F_SUSPEND) {
2986 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
2987 		    IEEE80211_FC0_TYPE_DATA) {
2988 			freemsg(mp);
2989 		}
2990 		err = IWP_FAIL;
2991 		goto exit;
2992 	}
2993 
2994 	mutex_enter(&sc->sc_tx_lock);
2995 	ring = &sc->sc_txq[0];
2996 	data = &ring->data[ring->cur];
2997 	cmd = data->cmd;
2998 	bzero(cmd, sizeof (*cmd));
2999 
3000 	ring->cur = (ring->cur + 1) % ring->count;
3001 
3002 	/*
3003 	 * Need reschedule TX if TX buffer is full.
3004 	 */
3005 	if (ring->queued > ring->count - IWP_MAX_WIN_SIZE) {
3006 		IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3007 		"no txbuf\n"));
3008 
3009 		sc->sc_need_reschedule = 1;
3010 		mutex_exit(&sc->sc_tx_lock);
3011 
3012 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
3013 		    IEEE80211_FC0_TYPE_DATA) {
3014 			freemsg(mp);
3015 		}
3016 		sc->sc_tx_nobuf++;
3017 		err = IWP_FAIL;
3018 		goto exit;
3019 	}
3020 
3021 	ring->queued++;
3022 
3023 	mutex_exit(&sc->sc_tx_lock);
3024 
3025 	hdrlen = ieee80211_hdrspace(ic, mp->b_rptr);
3026 
3027 	m = allocb(msgdsize(mp) + 32, BPRI_MED);
3028 	if (NULL == m) { /* can not alloc buf, drop this package */
3029 		cmn_err(CE_WARN, "iwp_send(): "
3030 		    "failed to allocate msgbuf\n");
3031 		freemsg(mp);
3032 
3033 		mutex_enter(&sc->sc_tx_lock);
3034 		ring->queued--;
3035 		if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3036 			sc->sc_need_reschedule = 0;
3037 			mutex_exit(&sc->sc_tx_lock);
3038 			mac_tx_update(ic->ic_mach);
3039 			mutex_enter(&sc->sc_tx_lock);
3040 		}
3041 		mutex_exit(&sc->sc_tx_lock);
3042 
3043 		err = IWP_SUCCESS;
3044 		goto exit;
3045 	}
3046 
3047 	for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) {
3048 		mblen = MBLKL(m0);
3049 		(void) memcpy(m->b_rptr + off, m0->b_rptr, mblen);
3050 		off += mblen;
3051 	}
3052 
3053 	m->b_wptr += off;
3054 
3055 	wh = (struct ieee80211_frame *)m->b_rptr;
3056 
3057 	/*
3058 	 * determine send which AP or station in IBSS
3059 	 */
3060 	in = ieee80211_find_txnode(ic, wh->i_addr1);
3061 	if (NULL == in) {
3062 		cmn_err(CE_WARN, "iwp_send(): "
3063 		    "failed to find tx node\n");
3064 		freemsg(mp);
3065 		freemsg(m);
3066 		sc->sc_tx_err++;
3067 
3068 		mutex_enter(&sc->sc_tx_lock);
3069 		ring->queued--;
3070 		if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3071 			sc->sc_need_reschedule = 0;
3072 			mutex_exit(&sc->sc_tx_lock);
3073 			mac_tx_update(ic->ic_mach);
3074 			mutex_enter(&sc->sc_tx_lock);
3075 		}
3076 		mutex_exit(&sc->sc_tx_lock);
3077 
3078 		err = IWP_SUCCESS;
3079 		goto exit;
3080 	}
3081 
3082 	/*
3083 	 * Net80211 module encapsulate outbound data frames.
3084 	 * Add some feilds of 80211 frame.
3085 	 */
3086 	if ((type & IEEE80211_FC0_TYPE_MASK) ==
3087 	    IEEE80211_FC0_TYPE_DATA) {
3088 		(void) ieee80211_encap(ic, m, in);
3089 	}
3090 
3091 	freemsg(mp);
3092 
3093 	cmd->hdr.type = REPLY_TX;
3094 	cmd->hdr.flags = 0;
3095 	cmd->hdr.qid = ring->qid;
3096 
3097 	tx = (iwp_tx_cmd_t *)cmd->data;
3098 	tx->tx_flags = 0;
3099 
3100 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3101 		tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK));
3102 	} else {
3103 		tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
3104 	}
3105 
3106 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
3107 		k = ieee80211_crypto_encap(ic, m);
3108 		if (NULL == k) {
3109 			freemsg(m);
3110 			sc->sc_tx_err++;
3111 
3112 			mutex_enter(&sc->sc_tx_lock);
3113 			ring->queued--;
3114 			if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3115 				sc->sc_need_reschedule = 0;
3116 				mutex_exit(&sc->sc_tx_lock);
3117 				mac_tx_update(ic->ic_mach);
3118 				mutex_enter(&sc->sc_tx_lock);
3119 			}
3120 			mutex_exit(&sc->sc_tx_lock);
3121 
3122 			err = IWP_SUCCESS;
3123 			goto exit;
3124 		}
3125 
3126 		/* packet header may have moved, reset our local pointer */
3127 		wh = (struct ieee80211_frame *)m->b_rptr;
3128 	}
3129 
3130 	len = msgdsize(m);
3131 
3132 #ifdef DEBUG
3133 	if (iwp_dbg_flags & IWP_DEBUG_TX) {
3134 		ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0);
3135 	}
3136 #endif
3137 
3138 	tx->rts_retry_limit = IWP_TX_RTS_RETRY_LIMIT;
3139 	tx->data_retry_limit = IWP_TX_DATA_RETRY_LIMIT;
3140 
3141 	/*
3142 	 * specific TX parameters for management frames
3143 	 */
3144 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
3145 	    IEEE80211_FC0_TYPE_MGT) {
3146 		/*
3147 		 * mgmt frames are sent at 1M
3148 		 */
3149 		if ((in->in_rates.ir_rates[0] &
3150 		    IEEE80211_RATE_VAL) != 0) {
3151 			rate = in->in_rates.ir_rates[0] & IEEE80211_RATE_VAL;
3152 		} else {
3153 			rate = 2;
3154 		}
3155 
3156 		tx->tx_flags |= LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3157 
3158 		/*
3159 		 * tell h/w to set timestamp in probe responses
3160 		 */
3161 		if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3162 		    IEEE80211_FC0_SUBTYPE_PROBE_RESP) {
3163 			tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK);
3164 
3165 			tx->data_retry_limit = 3;
3166 			if (tx->data_retry_limit < tx->rts_retry_limit) {
3167 				tx->rts_retry_limit = tx->data_retry_limit;
3168 			}
3169 		}
3170 
3171 		if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3172 		    IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
3173 		    ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3174 		    IEEE80211_FC0_SUBTYPE_REASSOC_REQ)) {
3175 			tx->timeout.pm_frame_timeout = LE_16(3);
3176 		} else {
3177 			tx->timeout.pm_frame_timeout = LE_16(2);
3178 		}
3179 
3180 	} else {
3181 		/*
3182 		 * do it here for the software way rate scaling.
3183 		 * later for rate scaling in hardware.
3184 		 *
3185 		 * now the txrate is determined in tx cmd flags, set to the
3186 		 * max value 54M for 11g and 11M for 11b originally.
3187 		 */
3188 		if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
3189 			rate = ic->ic_fixed_rate;
3190 		} else {
3191 			if ((in->in_rates.ir_rates[in->in_txrate] &
3192 			    IEEE80211_RATE_VAL) != 0) {
3193 				rate = in->in_rates.
3194 				    ir_rates[in->in_txrate] &
3195 				    IEEE80211_RATE_VAL;
3196 			}
3197 		}
3198 
3199 		tx->tx_flags |= LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3200 
3201 		tx->timeout.pm_frame_timeout = 0;
3202 	}
3203 
3204 	IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3205 	    "tx rate[%d of %d] = %x",
3206 	    in->in_txrate, in->in_rates.ir_nrates, rate));
3207 
3208 	len0 = roundup(4 + sizeof (iwp_tx_cmd_t) + hdrlen, 4);
3209 	if (len0 != (4 + sizeof (iwp_tx_cmd_t) + hdrlen)) {
3210 		tx->tx_flags |= LE_32(TX_CMD_FLG_MH_PAD_MSK);
3211 	}
3212 
3213 	/*
3214 	 * retrieve destination node's id
3215 	 */
3216 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3217 		tx->sta_id = IWP_BROADCAST_ID;
3218 	} else {
3219 		tx->sta_id = IWP_AP_ID;
3220 	}
3221 
3222 	if (2 == rate || 4 == rate || 11 == rate || 22 == rate) {
3223 		masks |= RATE_MCS_CCK_MSK;
3224 	}
3225 
3226 	masks |= RATE_MCS_ANT_B_MSK;
3227 	tx->rate.r.rate_n_flags = LE_32(iwp_rate_to_plcp(rate) | masks);
3228 
3229 	IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3230 	    "tx flag = %x",
3231 	    tx->tx_flags));
3232 
3233 	tx->stop_time.life_time  = LE_32(0xffffffff);
3234 
3235 	tx->len = LE_16(len);
3236 
3237 	tx->dram_lsb_ptr =
3238 	    LE_32(data->paddr_cmd + 4 + offsetof(iwp_tx_cmd_t, scratch));
3239 	tx->dram_msb_ptr = 0;
3240 	tx->driver_txop = 0;
3241 	tx->next_frame_len = 0;
3242 
3243 	(void) memcpy(tx + 1, m->b_rptr, hdrlen);
3244 	m->b_rptr += hdrlen;
3245 	(void) memcpy(data->dma_data.mem_va, m->b_rptr, len - hdrlen);
3246 
3247 	IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3248 	    "sending data: qid=%d idx=%d len=%d",
3249 	    ring->qid, ring->cur, len));
3250 
3251 	/*
3252 	 * first segment includes the tx cmd plus the 802.11 header,
3253 	 * the second includes the remaining of the 802.11 frame.
3254 	 */
3255 	mutex_enter(&sc->sc_tx_lock);
3256 
3257 	cmd->hdr.idx = ring->desc_cur;
3258 
3259 	desc_data = &ring->data[ring->desc_cur];
3260 	desc = desc_data->desc;
3261 	bzero(desc, sizeof (*desc));
3262 	desc->val0 = 2 << 24;
3263 	desc->pa[0].tb1_addr = data->paddr_cmd;
3264 	desc->pa[0].val1 = ((len0 << 4) & 0xfff0) |
3265 	    ((data->dma_data.cookie.dmac_address & 0xffff) << 16);
3266 	desc->pa[0].val2 =
3267 	    ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) |
3268 	    ((len - hdrlen) << 20);
3269 	IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3270 	    "phy addr1 = 0x%x phy addr2 = 0x%x "
3271 	    "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x",
3272 	    data->paddr_cmd, data->dma_data.cookie.dmac_address,
3273 	    len0, len - hdrlen, desc->pa[0].val1, desc->pa[0].val2));
3274 
3275 	/*
3276 	 * kick ring
3277 	 */
3278 	s_id = tx->sta_id;
3279 
3280 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3281 	    tfd_offset[ring->desc_cur].val =
3282 	    (8 + len) | (s_id << 12);
3283 	if (ring->desc_cur < IWP_MAX_WIN_SIZE) {
3284 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3285 		    tfd_offset[IWP_QUEUE_SIZE + ring->desc_cur].val =
3286 		    (8 + len) | (s_id << 12);
3287 	}
3288 
3289 	IWP_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
3290 	IWP_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
3291 
3292 	ring->desc_cur = (ring->desc_cur + 1) % ring->count;
3293 	IWP_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->desc_cur);
3294 
3295 	mutex_exit(&sc->sc_tx_lock);
3296 	freemsg(m);
3297 
3298 	/*
3299 	 * release node reference
3300 	 */
3301 	ieee80211_free_node(in);
3302 
3303 	ic->ic_stats.is_tx_bytes += len;
3304 	ic->ic_stats.is_tx_frags++;
3305 
3306 	mutex_enter(&sc->sc_mt_lock);
3307 	if (0 == sc->sc_tx_timer) {
3308 		sc->sc_tx_timer = 4;
3309 	}
3310 	mutex_exit(&sc->sc_mt_lock);
3311 
3312 exit:
3313 	return (err);
3314 }
3315 
3316 /*
3317  * invoked by GLD to deal with IOCTL affaires
3318  */
3319 static void
3320 iwp_m_ioctl(void* arg, queue_t *wq, mblk_t *mp)
3321 {
3322 	iwp_sc_t	*sc;
3323 	ieee80211com_t	*ic;
3324 	int		err = EINVAL;
3325 
3326 	if (NULL == arg) {
3327 		return;
3328 	}
3329 	sc = (iwp_sc_t *)arg;
3330 	ic = &sc->sc_ic;
3331 
3332 	err = ieee80211_ioctl(ic, wq, mp);
3333 	if (ENETRESET == err) {
3334 		/*
3335 		 * This is special for the hidden AP connection.
3336 		 * In any case, we should make sure only one 'scan'
3337 		 * in the driver for a 'connect' CLI command. So
3338 		 * when connecting to a hidden AP, the scan is just
3339 		 * sent out to the air when we know the desired
3340 		 * essid of the AP we want to connect.
3341 		 */
3342 		if (ic->ic_des_esslen) {
3343 			if (sc->sc_flags & IWP_F_RUNNING) {
3344 				iwp_m_stop(sc);
3345 				(void) iwp_m_start(sc);
3346 				(void) ieee80211_new_state(ic,
3347 				    IEEE80211_S_SCAN, -1);
3348 			}
3349 		}
3350 	}
3351 }
3352 
3353 /*
3354  * Call back functions for get/set proporty
3355  */
3356 static int
3357 iwp_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3358     uint_t pr_flags, uint_t wldp_length, void *wldp_buf, uint_t *perm)
3359 {
3360 	iwp_sc_t	*sc;
3361 	int		err = EINVAL;
3362 
3363 	if (NULL == arg) {
3364 		return (EINVAL);
3365 	}
3366 	sc = (iwp_sc_t *)arg;
3367 
3368 	err = ieee80211_getprop(&sc->sc_ic, pr_name, wldp_pr_num,
3369 	    pr_flags, wldp_length, wldp_buf, perm);
3370 
3371 	return (err);
3372 }
3373 
3374 static int
3375 iwp_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3376     uint_t wldp_length, const void *wldp_buf)
3377 {
3378 	iwp_sc_t		*sc;
3379 	ieee80211com_t		*ic;
3380 	int			err = EINVAL;
3381 
3382 	if (NULL == arg) {
3383 		return (EINVAL);
3384 	}
3385 	sc = (iwp_sc_t *)arg;
3386 	ic = &sc->sc_ic;
3387 
3388 	err = ieee80211_setprop(ic, pr_name, wldp_pr_num, wldp_length,
3389 	    wldp_buf);
3390 
3391 	if (err == ENETRESET) {
3392 		if (ic->ic_des_esslen) {
3393 			if (sc->sc_flags & IWP_F_RUNNING) {
3394 				iwp_m_stop(sc);
3395 				(void) iwp_m_start(sc);
3396 				(void) ieee80211_new_state(ic,
3397 				    IEEE80211_S_SCAN, -1);
3398 			}
3399 		}
3400 		err = 0;
3401 	}
3402 	return (err);
3403 }
3404 
3405 /*
3406  * invoked by GLD supply statistics NIC and driver
3407  */
3408 static int
3409 iwp_m_stat(void *arg, uint_t stat, uint64_t *val)
3410 {
3411 	iwp_sc_t	*sc;
3412 	ieee80211com_t	*ic;
3413 	ieee80211_node_t *in;
3414 
3415 	if (NULL == arg) {
3416 		return (EINVAL);
3417 	}
3418 	sc = (iwp_sc_t *)arg;
3419 	ic = &sc->sc_ic;
3420 
3421 	mutex_enter(&sc->sc_glock);
3422 
3423 	switch (stat) {
3424 	case MAC_STAT_IFSPEED:
3425 		in = ic->ic_bss;
3426 		*val = ((IEEE80211_FIXED_RATE_NONE == ic->ic_fixed_rate) ?
3427 		    IEEE80211_RATE(in->in_txrate) :
3428 		    ic->ic_fixed_rate) / 2 * 1000000;
3429 		break;
3430 	case MAC_STAT_NOXMTBUF:
3431 		*val = sc->sc_tx_nobuf;
3432 		break;
3433 	case MAC_STAT_NORCVBUF:
3434 		*val = sc->sc_rx_nobuf;
3435 		break;
3436 	case MAC_STAT_IERRORS:
3437 		*val = sc->sc_rx_err;
3438 		break;
3439 	case MAC_STAT_RBYTES:
3440 		*val = ic->ic_stats.is_rx_bytes;
3441 		break;
3442 	case MAC_STAT_IPACKETS:
3443 		*val = ic->ic_stats.is_rx_frags;
3444 		break;
3445 	case MAC_STAT_OBYTES:
3446 		*val = ic->ic_stats.is_tx_bytes;
3447 		break;
3448 	case MAC_STAT_OPACKETS:
3449 		*val = ic->ic_stats.is_tx_frags;
3450 		break;
3451 	case MAC_STAT_OERRORS:
3452 	case WIFI_STAT_TX_FAILED:
3453 		*val = sc->sc_tx_err;
3454 		break;
3455 	case WIFI_STAT_TX_RETRANS:
3456 		*val = sc->sc_tx_retries;
3457 		break;
3458 	case WIFI_STAT_FCS_ERRORS:
3459 	case WIFI_STAT_WEP_ERRORS:
3460 	case WIFI_STAT_TX_FRAGS:
3461 	case WIFI_STAT_MCAST_TX:
3462 	case WIFI_STAT_RTS_SUCCESS:
3463 	case WIFI_STAT_RTS_FAILURE:
3464 	case WIFI_STAT_ACK_FAILURE:
3465 	case WIFI_STAT_RX_FRAGS:
3466 	case WIFI_STAT_MCAST_RX:
3467 	case WIFI_STAT_RX_DUPS:
3468 		mutex_exit(&sc->sc_glock);
3469 		return (ieee80211_stat(ic, stat, val));
3470 	default:
3471 		mutex_exit(&sc->sc_glock);
3472 		return (ENOTSUP);
3473 	}
3474 
3475 	mutex_exit(&sc->sc_glock);
3476 
3477 	return (IWP_SUCCESS);
3478 
3479 }
3480 
3481 /*
3482  * invoked by GLD to start or open NIC
3483  */
3484 static int
3485 iwp_m_start(void *arg)
3486 {
3487 	iwp_sc_t *sc;
3488 	ieee80211com_t	*ic;
3489 	int err = IWP_FAIL;
3490 
3491 	if (NULL == arg) {
3492 		return (EINVAL);
3493 	}
3494 	sc = (iwp_sc_t *)arg;
3495 	ic = &sc->sc_ic;
3496 
3497 	err = iwp_init(sc);
3498 	if (err != IWP_SUCCESS) {
3499 		/*
3500 		 * The hw init err(eg. RF is OFF). Return Success to make
3501 		 * the 'plumb' succeed. The iwp_thread() tries to re-init
3502 		 * background.
3503 		 */
3504 		atomic_or_32(&sc->sc_flags, IWP_F_HW_ERR_RECOVER);
3505 		return (IWP_SUCCESS);
3506 	}
3507 
3508 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3509 
3510 	atomic_or_32(&sc->sc_flags, IWP_F_RUNNING);
3511 
3512 	return (IWP_SUCCESS);
3513 }
3514 
3515 /*
3516  * invoked by GLD to stop or down NIC
3517  */
3518 static void
3519 iwp_m_stop(void *arg)
3520 {
3521 	iwp_sc_t *sc;
3522 	ieee80211com_t	*ic;
3523 
3524 	if (NULL == arg) {
3525 		return;
3526 	}
3527 	sc = (iwp_sc_t *)arg;
3528 	ic = &sc->sc_ic;
3529 
3530 	iwp_stop(sc);
3531 
3532 	/*
3533 	 * release buffer for calibration
3534 	 */
3535 	iwp_release_calib_buffer(sc);
3536 
3537 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3538 
3539 	atomic_and_32(&sc->sc_flags, ~IWP_F_HW_ERR_RECOVER);
3540 	atomic_and_32(&sc->sc_flags, ~IWP_F_RATE_AUTO_CTL);
3541 
3542 	atomic_and_32(&sc->sc_flags, ~IWP_F_RUNNING);
3543 	atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
3544 }
3545 
3546 /*
3547  * invoked by GLD to configure NIC
3548  */
3549 static int
3550 iwp_m_unicst(void *arg, const uint8_t *macaddr)
3551 {
3552 	iwp_sc_t *sc;
3553 	ieee80211com_t	*ic;
3554 	int err = IWP_SUCCESS;
3555 
3556 	if (NULL == arg) {
3557 		return (EINVAL);
3558 	}
3559 	sc = (iwp_sc_t *)arg;
3560 	ic = &sc->sc_ic;
3561 
3562 	if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) {
3563 		IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr);
3564 		mutex_enter(&sc->sc_glock);
3565 		err = iwp_config(sc);
3566 		mutex_exit(&sc->sc_glock);
3567 		if (err != IWP_SUCCESS) {
3568 			cmn_err(CE_WARN, "iwp_m_unicst(): "
3569 			    "failed to configure device\n");
3570 			goto fail;
3571 		}
3572 	}
3573 
3574 	return (err);
3575 
3576 fail:
3577 	return (err);
3578 }
3579 
3580 /* ARGSUSED */
3581 static int
3582 iwp_m_multicst(void *arg, boolean_t add, const uint8_t *m)
3583 {
3584 	return (IWP_SUCCESS);
3585 }
3586 
3587 /* ARGSUSED */
3588 static int
3589 iwp_m_promisc(void *arg, boolean_t on)
3590 {
3591 	return (IWP_SUCCESS);
3592 }
3593 
3594 /*
3595  * kernel thread to deal with exceptional situation
3596  */
3597 static void
3598 iwp_thread(iwp_sc_t *sc)
3599 {
3600 	ieee80211com_t	*ic = &sc->sc_ic;
3601 	clock_t clk;
3602 	int err, n = 0, timeout = 0;
3603 	uint32_t tmp;
3604 #ifdef	DEBUG
3605 	int times = 0;
3606 #endif
3607 
3608 	while (sc->sc_mf_thread_switch) {
3609 		tmp = IWP_READ(sc, CSR_GP_CNTRL);
3610 		if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) {
3611 			atomic_and_32(&sc->sc_flags, ~IWP_F_RADIO_OFF);
3612 		} else {
3613 			atomic_or_32(&sc->sc_flags, IWP_F_RADIO_OFF);
3614 		}
3615 
3616 		/*
3617 		 * If  in SUSPEND or the RF is OFF, do nothing.
3618 		 */
3619 		if (sc->sc_flags & IWP_F_RADIO_OFF) {
3620 			delay(drv_usectohz(100000));
3621 			continue;
3622 		}
3623 
3624 		/*
3625 		 * recovery fatal error
3626 		 */
3627 		if (ic->ic_mach &&
3628 		    (sc->sc_flags & IWP_F_HW_ERR_RECOVER)) {
3629 
3630 			IWP_DBG((IWP_DEBUG_FW, "iwp_thread(): "
3631 			    "try to recover fatal hw error: %d\n", times++));
3632 
3633 			iwp_stop(sc);
3634 
3635 			if (IWP_CHK_FAST_RECOVER(sc)) {
3636 				/* save runtime configuration */
3637 				bcopy(&sc->sc_config, &sc->sc_config_save,
3638 				    sizeof (sc->sc_config));
3639 			} else {
3640 				ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3641 				delay(drv_usectohz(2000000 + n*500000));
3642 			}
3643 
3644 			err = iwp_init(sc);
3645 			if (err != IWP_SUCCESS) {
3646 				n++;
3647 				if (n < 20) {
3648 					continue;
3649 				}
3650 			}
3651 
3652 			n = 0;
3653 			if (!err) {
3654 				atomic_or_32(&sc->sc_flags, IWP_F_RUNNING);
3655 			}
3656 
3657 
3658 			if (!IWP_CHK_FAST_RECOVER(sc) ||
3659 			    iwp_fast_recover(sc) != IWP_SUCCESS) {
3660 				atomic_and_32(&sc->sc_flags,
3661 				    ~IWP_F_HW_ERR_RECOVER);
3662 
3663 				delay(drv_usectohz(2000000));
3664 				if (sc->sc_ostate != IEEE80211_S_INIT) {
3665 					ieee80211_new_state(ic,
3666 					    IEEE80211_S_SCAN, 0);
3667 				}
3668 			}
3669 		}
3670 
3671 		if (ic->ic_mach &&
3672 		    (sc->sc_flags & IWP_F_SCANNING) && sc->sc_scan_pending) {
3673 			IWP_DBG((IWP_DEBUG_SCAN, "iwp_thread(): "
3674 			    "wait for probe response\n"));
3675 
3676 			sc->sc_scan_pending--;
3677 			delay(drv_usectohz(200000));
3678 			ieee80211_next_scan(ic);
3679 		}
3680 
3681 		/*
3682 		 * rate ctl
3683 		 */
3684 		if (ic->ic_mach &&
3685 		    (sc->sc_flags & IWP_F_RATE_AUTO_CTL)) {
3686 			clk = ddi_get_lbolt();
3687 			if (clk > sc->sc_clk + drv_usectohz(1000000)) {
3688 				iwp_amrr_timeout(sc);
3689 			}
3690 		}
3691 
3692 		delay(drv_usectohz(100000));
3693 
3694 		mutex_enter(&sc->sc_mt_lock);
3695 		if (sc->sc_tx_timer) {
3696 			timeout++;
3697 			if (10 == timeout) {
3698 				sc->sc_tx_timer--;
3699 				if (0 == sc->sc_tx_timer) {
3700 					atomic_or_32(&sc->sc_flags,
3701 					    IWP_F_HW_ERR_RECOVER);
3702 					sc->sc_ostate = IEEE80211_S_RUN;
3703 					IWP_DBG((IWP_DEBUG_FW, "iwp_thread(): "
3704 					    "try to recover from "
3705 					    "send fail\n"));
3706 				}
3707 				timeout = 0;
3708 			}
3709 		}
3710 		mutex_exit(&sc->sc_mt_lock);
3711 	}
3712 
3713 	mutex_enter(&sc->sc_mt_lock);
3714 	sc->sc_mf_thread = NULL;
3715 	cv_signal(&sc->sc_mt_cv);
3716 	mutex_exit(&sc->sc_mt_lock);
3717 }
3718 
3719 
3720 /*
3721  * Send a command to the ucode.
3722  */
3723 static int
3724 iwp_cmd(iwp_sc_t *sc, int code, const void *buf, int size, int async)
3725 {
3726 	iwp_tx_ring_t *ring = &sc->sc_txq[IWP_CMD_QUEUE_NUM];
3727 	iwp_tx_desc_t *desc;
3728 	iwp_cmd_t *cmd;
3729 
3730 	ASSERT(size <= sizeof (cmd->data));
3731 	ASSERT(mutex_owned(&sc->sc_glock));
3732 
3733 	IWP_DBG((IWP_DEBUG_CMD, "iwp_cmd() "
3734 	    "code[%d]", code));
3735 	desc = ring->data[ring->cur].desc;
3736 	cmd = ring->data[ring->cur].cmd;
3737 
3738 	cmd->hdr.type = (uint8_t)code;
3739 	cmd->hdr.flags = 0;
3740 	cmd->hdr.qid = ring->qid;
3741 	cmd->hdr.idx = ring->cur;
3742 	(void) memcpy(cmd->data, buf, size);
3743 	(void) memset(desc, 0, sizeof (*desc));
3744 
3745 	desc->val0 = 1 << 24;
3746 	desc->pa[0].tb1_addr =
3747 	    (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff);
3748 	desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0;
3749 
3750 	if (async) {
3751 		sc->sc_cmd_accum++;
3752 	}
3753 
3754 	/*
3755 	 * kick cmd ring XXX
3756 	 */
3757 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3758 	    tfd_offset[ring->cur].val = 8;
3759 	if (ring->cur < IWP_MAX_WIN_SIZE) {
3760 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3761 		    tfd_offset[IWP_QUEUE_SIZE + ring->cur].val = 8;
3762 	}
3763 	ring->cur = (ring->cur + 1) % ring->count;
3764 	IWP_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3765 
3766 	if (async) {
3767 		return (IWP_SUCCESS);
3768 	} else {
3769 		clock_t clk;
3770 
3771 		clk = ddi_get_lbolt() + drv_usectohz(2000000);
3772 		while (sc->sc_cmd_flag != SC_CMD_FLG_DONE) {
3773 			if (cv_timedwait(&sc->sc_cmd_cv,
3774 			    &sc->sc_glock, clk) < 0) {
3775 				break;
3776 			}
3777 		}
3778 
3779 		if (SC_CMD_FLG_DONE == sc->sc_cmd_flag) {
3780 			sc->sc_cmd_flag = SC_CMD_FLG_NONE;
3781 			return (IWP_SUCCESS);
3782 		} else {
3783 			sc->sc_cmd_flag = SC_CMD_FLG_NONE;
3784 			return (IWP_FAIL);
3785 		}
3786 	}
3787 }
3788 
3789 /*
3790  * require ucode seting led of NIC
3791  */
3792 static void
3793 iwp_set_led(iwp_sc_t *sc, uint8_t id, uint8_t off, uint8_t on)
3794 {
3795 	iwp_led_cmd_t led;
3796 
3797 	led.interval = LE_32(100000);	/* unit: 100ms */
3798 	led.id = id;
3799 	led.off = off;
3800 	led.on = on;
3801 
3802 	(void) iwp_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1);
3803 }
3804 
3805 /*
3806  * necessary setting to NIC before authentication
3807  */
3808 static int
3809 iwp_hw_set_before_auth(iwp_sc_t *sc)
3810 {
3811 	ieee80211com_t *ic = &sc->sc_ic;
3812 	ieee80211_node_t *in = ic->ic_bss;
3813 	int err = IWP_FAIL;
3814 
3815 	/*
3816 	 * update adapter's configuration according
3817 	 * the info of target AP
3818 	 */
3819 	IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
3820 	sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, in->in_chan));
3821 
3822 		sc->sc_config.ofdm_ht_triple_stream_basic_rates = 0;
3823 		sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0;
3824 		sc->sc_config.ofdm_ht_single_stream_basic_rates = 0;
3825 
3826 		if (IEEE80211_MODE_11B == ic->ic_curmode) {
3827 			sc->sc_config.cck_basic_rates  = 0x03;
3828 			sc->sc_config.ofdm_basic_rates = 0;
3829 		} else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
3830 		    (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
3831 			sc->sc_config.cck_basic_rates  = 0;
3832 			sc->sc_config.ofdm_basic_rates = 0x15;
3833 		} else { /* assume 802.11b/g */
3834 			sc->sc_config.cck_basic_rates  = 0x0f;
3835 			sc->sc_config.ofdm_basic_rates = 0xff;
3836 		}
3837 
3838 	sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
3839 	    RXON_FLG_SHORT_SLOT_MSK);
3840 
3841 	if (ic->ic_flags & IEEE80211_F_SHSLOT) {
3842 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK);
3843 	} else {
3844 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK);
3845 	}
3846 
3847 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
3848 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
3849 	} else {
3850 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK);
3851 	}
3852 
3853 	IWP_DBG((IWP_DEBUG_80211, "iwp_hw_set_before_auth(): "
3854 	    "config chan %d flags %x "
3855 	    "filter_flags %x  cck %x ofdm %x"
3856 	    " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n",
3857 	    LE_16(sc->sc_config.chan), LE_32(sc->sc_config.flags),
3858 	    LE_32(sc->sc_config.filter_flags),
3859 	    sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates,
3860 	    sc->sc_config.bssid[0], sc->sc_config.bssid[1],
3861 	    sc->sc_config.bssid[2], sc->sc_config.bssid[3],
3862 	    sc->sc_config.bssid[4], sc->sc_config.bssid[5]));
3863 
3864 	err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
3865 	    sizeof (iwp_rxon_cmd_t), 1);
3866 	if (err != IWP_SUCCESS) {
3867 		cmn_err(CE_WARN, "iwp_hw_set_before_auth(): "
3868 		    "failed to config chan%d\n", sc->sc_config.chan);
3869 		return (err);
3870 	}
3871 
3872 	/*
3873 	 * add default AP node
3874 	 */
3875 	err = iwp_add_ap_sta(sc);
3876 	if (err != IWP_SUCCESS) {
3877 		return (err);
3878 	}
3879 
3880 
3881 	return (err);
3882 }
3883 
3884 /*
3885  * Send a scan request(assembly scan cmd) to the firmware.
3886  */
3887 static int
3888 iwp_scan(iwp_sc_t *sc)
3889 {
3890 	ieee80211com_t *ic = &sc->sc_ic;
3891 	iwp_tx_ring_t *ring = &sc->sc_txq[IWP_CMD_QUEUE_NUM];
3892 	iwp_tx_desc_t *desc;
3893 	iwp_tx_data_t *data;
3894 	iwp_cmd_t *cmd;
3895 	iwp_scan_hdr_t *hdr;
3896 	iwp_scan_chan_t chan;
3897 	struct ieee80211_frame *wh;
3898 	ieee80211_node_t *in = ic->ic_bss;
3899 	uint8_t essid[IEEE80211_NWID_LEN+1];
3900 	struct ieee80211_rateset *rs;
3901 	enum ieee80211_phymode mode;
3902 	uint8_t *frm;
3903 	int i, pktlen, nrates;
3904 
3905 	data = &ring->data[ring->cur];
3906 	desc = data->desc;
3907 	cmd = (iwp_cmd_t *)data->dma_data.mem_va;
3908 
3909 	cmd->hdr.type = REPLY_SCAN_CMD;
3910 	cmd->hdr.flags = 0;
3911 	cmd->hdr.qid = ring->qid;
3912 	cmd->hdr.idx = ring->cur | 0x40;
3913 
3914 	hdr = (iwp_scan_hdr_t *)cmd->data;
3915 	(void) memset(hdr, 0, sizeof (iwp_scan_hdr_t));
3916 	hdr->nchan = 1;
3917 	hdr->quiet_time = LE_16(50);
3918 	hdr->quiet_plcp_th = LE_16(1);
3919 
3920 	hdr->flags = LE_32(RXON_FLG_BAND_24G_MSK);
3921 	hdr->rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3922 	    (0x7 << RXON_RX_CHAIN_VALID_POS) |
3923 	    (0x2 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3924 	    (0x2 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3925 
3926 	hdr->tx_cmd.tx_flags = LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3927 	hdr->tx_cmd.sta_id = IWP_BROADCAST_ID;
3928 	hdr->tx_cmd.stop_time.life_time = LE_32(0xffffffff);
3929 	hdr->tx_cmd.rate.r.rate_n_flags = LE_32(iwp_rate_to_plcp(2));
3930 	hdr->tx_cmd.rate.r.rate_n_flags |=
3931 	    LE_32(RATE_MCS_ANT_B_MSK |RATE_MCS_CCK_MSK);
3932 	hdr->direct_scan[0].len = ic->ic_des_esslen;
3933 	hdr->direct_scan[0].id  = IEEE80211_ELEMID_SSID;
3934 
3935 	hdr->filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3936 	    RXON_FILTER_BCON_AWARE_MSK);
3937 
3938 	if (ic->ic_des_esslen) {
3939 		bcopy(ic->ic_des_essid, essid, ic->ic_des_esslen);
3940 		essid[ic->ic_des_esslen] = '\0';
3941 		IWP_DBG((IWP_DEBUG_SCAN, "iwp_scan(): "
3942 		    "directed scan %s\n", essid));
3943 
3944 		bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid,
3945 		    ic->ic_des_esslen);
3946 	} else {
3947 		bzero(hdr->direct_scan[0].ssid,
3948 		    sizeof (hdr->direct_scan[0].ssid));
3949 	}
3950 
3951 	/*
3952 	 * a probe request frame is required after the REPLY_SCAN_CMD
3953 	 */
3954 	wh = (struct ieee80211_frame *)(hdr + 1);
3955 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
3956 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
3957 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
3958 	(void) memset(wh->i_addr1, 0xff, 6);
3959 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr);
3960 	(void) memset(wh->i_addr3, 0xff, 6);
3961 	*(uint16_t *)&wh->i_dur[0] = 0;
3962 	*(uint16_t *)&wh->i_seq[0] = 0;
3963 
3964 	frm = (uint8_t *)(wh + 1);
3965 
3966 	/*
3967 	 * essid IE
3968 	 */
3969 	if (in->in_esslen) {
3970 		bcopy(in->in_essid, essid, in->in_esslen);
3971 		essid[in->in_esslen] = '\0';
3972 		IWP_DBG((IWP_DEBUG_SCAN, "iwp_scan(): "
3973 		    "probe with ESSID %s\n",
3974 		    essid));
3975 	}
3976 	*frm++ = IEEE80211_ELEMID_SSID;
3977 	*frm++ = in->in_esslen;
3978 	(void) memcpy(frm, in->in_essid, in->in_esslen);
3979 	frm += in->in_esslen;
3980 
3981 	mode = ieee80211_chan2mode(ic, ic->ic_curchan);
3982 	rs = &ic->ic_sup_rates[mode];
3983 
3984 	/*
3985 	 * supported rates IE
3986 	 */
3987 	*frm++ = IEEE80211_ELEMID_RATES;
3988 	nrates = rs->ir_nrates;
3989 	if (nrates > IEEE80211_RATE_SIZE) {
3990 		nrates = IEEE80211_RATE_SIZE;
3991 	}
3992 
3993 	*frm++ = (uint8_t)nrates;
3994 	(void) memcpy(frm, rs->ir_rates, nrates);
3995 	frm += nrates;
3996 
3997 	/*
3998 	 * supported xrates IE
3999 	 */
4000 	if (rs->ir_nrates > IEEE80211_RATE_SIZE) {
4001 		nrates = rs->ir_nrates - IEEE80211_RATE_SIZE;
4002 		*frm++ = IEEE80211_ELEMID_XRATES;
4003 		*frm++ = (uint8_t)nrates;
4004 		(void) memcpy(frm, rs->ir_rates + IEEE80211_RATE_SIZE, nrates);
4005 		frm += nrates;
4006 	}
4007 
4008 	/*
4009 	 * optionnal IE (usually for wpa)
4010 	 */
4011 	if (ic->ic_opt_ie != NULL) {
4012 		(void) memcpy(frm, ic->ic_opt_ie, ic->ic_opt_ie_len);
4013 		frm += ic->ic_opt_ie_len;
4014 	}
4015 
4016 	/* setup length of probe request */
4017 	hdr->tx_cmd.len = LE_16(_PTRDIFF(frm, wh));
4018 	hdr->len = LE_16(hdr->nchan * sizeof (iwp_scan_chan_t) +
4019 	    LE_16(hdr->tx_cmd.len) + sizeof (iwp_scan_hdr_t));
4020 
4021 	/*
4022 	 * the attribute of the scan channels are required after the probe
4023 	 * request frame.
4024 	 */
4025 	for (i = 1; i <= hdr->nchan; i++) {
4026 		if (ic->ic_des_esslen) {
4027 			chan.type = LE_32(3);
4028 		} else {
4029 			chan.type = LE_32(1);
4030 		}
4031 
4032 		chan.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan));
4033 		chan.tpc.tx_gain = 0x28;
4034 		chan.tpc.dsp_atten = 110;
4035 		chan.active_dwell = LE_16(50);
4036 		chan.passive_dwell = LE_16(120);
4037 
4038 		bcopy(&chan, frm, sizeof (iwp_scan_chan_t));
4039 		frm += sizeof (iwp_scan_chan_t);
4040 	}
4041 
4042 	pktlen = _PTRDIFF(frm, cmd);
4043 
4044 	(void) memset(desc, 0, sizeof (*desc));
4045 	desc->val0 = 1 << 24;
4046 	desc->pa[0].tb1_addr =
4047 	    (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff);
4048 	desc->pa[0].val1 = (pktlen << 4) & 0xfff0;
4049 
4050 	/*
4051 	 * maybe for cmd, filling the byte cnt table is not necessary.
4052 	 * anyway, we fill it here.
4053 	 */
4054 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid]
4055 	    .tfd_offset[ring->cur].val = 8;
4056 	if (ring->cur < IWP_MAX_WIN_SIZE) {
4057 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
4058 		    tfd_offset[IWP_QUEUE_SIZE + ring->cur].val = 8;
4059 	}
4060 
4061 	/*
4062 	 * kick cmd ring
4063 	 */
4064 	ring->cur = (ring->cur + 1) % ring->count;
4065 	IWP_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4066 
4067 	return (IWP_SUCCESS);
4068 }
4069 
4070 /*
4071  * configure NIC by using ucode commands after loading ucode.
4072  */
4073 static int
4074 iwp_config(iwp_sc_t *sc)
4075 {
4076 	ieee80211com_t *ic = &sc->sc_ic;
4077 	iwp_powertable_cmd_t powertable;
4078 	iwp_bt_cmd_t bt;
4079 	iwp_add_sta_t node;
4080 	iwp_rem_sta_t	rm_sta;
4081 	const uint8_t bcast[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
4082 	int err = IWP_FAIL;
4083 
4084 	/*
4085 	 * set power mode. Disable power management at present, do it later
4086 	 */
4087 	(void) memset(&powertable, 0, sizeof (powertable));
4088 	powertable.flags = LE_16(0x8);
4089 	err = iwp_cmd(sc, POWER_TABLE_CMD, &powertable,
4090 	    sizeof (powertable), 0);
4091 	if (err != IWP_SUCCESS) {
4092 		cmn_err(CE_WARN, "iwp_config(): "
4093 		    "failed to set power mode\n");
4094 		return (err);
4095 	}
4096 
4097 	/*
4098 	 * configure bt coexistence
4099 	 */
4100 	(void) memset(&bt, 0, sizeof (bt));
4101 	bt.flags = 3;
4102 	bt.lead_time = 0xaa;
4103 	bt.max_kill = 1;
4104 	err = iwp_cmd(sc, REPLY_BT_CONFIG, &bt,
4105 	    sizeof (bt), 0);
4106 	if (err != IWP_SUCCESS) {
4107 		cmn_err(CE_WARN, "iwp_config(): "
4108 		    "failed to configurate bt coexistence\n");
4109 		return (err);
4110 	}
4111 
4112 	/*
4113 	 * configure rxon
4114 	 */
4115 	(void) memset(&sc->sc_config, 0, sizeof (iwp_rxon_cmd_t));
4116 	IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
4117 	IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
4118 	sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan));
4119 	sc->sc_config.flags = LE_32(RXON_FLG_BAND_24G_MSK);
4120 	sc->sc_config.flags &= LE_32(~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
4121 	    RXON_FLG_CHANNEL_MODE_PURE_40_MSK));
4122 
4123 	switch (ic->ic_opmode) {
4124 	case IEEE80211_M_STA:
4125 		sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
4126 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4127 		    RXON_FILTER_DIS_DECRYPT_MSK |
4128 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
4129 		break;
4130 	case IEEE80211_M_IBSS:
4131 	case IEEE80211_M_AHDEMO:
4132 		sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
4133 
4134 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
4135 		sc->sc_config.filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4136 		    RXON_FILTER_DIS_DECRYPT_MSK |
4137 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
4138 		break;
4139 	case IEEE80211_M_HOSTAP:
4140 		sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
4141 		break;
4142 	case IEEE80211_M_MONITOR:
4143 		sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
4144 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4145 		    RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
4146 		break;
4147 	}
4148 
4149 	/*
4150 	 * Support all CCK rates.
4151 	 */
4152 	sc->sc_config.cck_basic_rates  = 0x0f;
4153 
4154 	/*
4155 	 * Support all OFDM rates.
4156 	 */
4157 	sc->sc_config.ofdm_basic_rates = 0xff;
4158 
4159 	sc->sc_config.rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK |
4160 	    (0x7 << RXON_RX_CHAIN_VALID_POS) |
4161 	    (0x2 << RXON_RX_CHAIN_FORCE_SEL_POS) |
4162 	    (0x2 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
4163 
4164 	err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
4165 	    sizeof (iwp_rxon_cmd_t), 0);
4166 	if (err != IWP_SUCCESS) {
4167 		cmn_err(CE_WARN, "iwp_config(): "
4168 		    "failed to set configure command\n");
4169 		return (err);
4170 	}
4171 
4172 	/*
4173 	 * remove all nodes in NIC
4174 	 */
4175 	(void) memset(&rm_sta, 0, sizeof (rm_sta));
4176 	rm_sta.num_sta = 1;
4177 	(void) memcpy(rm_sta.addr, bcast, 6);
4178 
4179 	err = iwp_cmd(sc, REPLY_REMOVE_STA, &rm_sta, sizeof (iwp_rem_sta_t), 0);
4180 	if (err != IWP_SUCCESS) {
4181 		cmn_err(CE_WARN, "iwp_config(): "
4182 		    "failed to remove broadcast node in hardware.\n");
4183 		return (err);
4184 	}
4185 
4186 	/*
4187 	 * add broadcast node so that we can send broadcast frame
4188 	 */
4189 	(void) memset(&node, 0, sizeof (node));
4190 	(void) memset(node.sta.addr, 0xff, 6);
4191 	node.mode = 0;
4192 	node.sta.sta_id = IWP_BROADCAST_ID;
4193 	node.station_flags = 0;
4194 
4195 	err = iwp_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0);
4196 	if (err != IWP_SUCCESS) {
4197 		cmn_err(CE_WARN, "iwp_config(): "
4198 		    "failed to add broadcast node\n");
4199 		return (err);
4200 	}
4201 
4202 	return (err);
4203 }
4204 
4205 /*
4206  * quiesce(9E) entry point.
4207  * This function is called when the system is single-threaded at high
4208  * PIL with preemption disabled. Therefore, this function must not be
4209  * blocked.
4210  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
4211  * DDI_FAILURE indicates an error condition and should almost never happen.
4212  */
4213 static int
4214 iwp_quiesce(dev_info_t *dip)
4215 {
4216 	iwp_sc_t *sc;
4217 
4218 	sc = ddi_get_soft_state(iwp_soft_state_p, ddi_get_instance(dip));
4219 	if (NULL == sc) {
4220 		return (DDI_FAILURE);
4221 	}
4222 
4223 #ifdef DEBUG
4224 	/* by pass any messages, if it's quiesce */
4225 	iwp_dbg_flags = 0;
4226 #endif
4227 
4228 	/*
4229 	 * No more blocking is allowed while we are in the
4230 	 * quiesce(9E) entry point.
4231 	 */
4232 	atomic_or_32(&sc->sc_flags, IWP_F_QUIESCED);
4233 
4234 	/*
4235 	 * Disable and mask all interrupts.
4236 	 */
4237 	iwp_stop(sc);
4238 
4239 	return (DDI_SUCCESS);
4240 }
4241 
4242 static void
4243 iwp_stop_master(iwp_sc_t *sc)
4244 {
4245 	uint32_t tmp;
4246 	int n;
4247 
4248 	tmp = IWP_READ(sc, CSR_RESET);
4249 	IWP_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER);
4250 
4251 	tmp = IWP_READ(sc, CSR_GP_CNTRL);
4252 	if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) ==
4253 	    CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE) {
4254 		return;
4255 	}
4256 
4257 	for (n = 0; n < 2000; n++) {
4258 		if (IWP_READ(sc, CSR_RESET) &
4259 		    CSR_RESET_REG_FLAG_MASTER_DISABLED) {
4260 			break;
4261 		}
4262 		DELAY(1000);
4263 	}
4264 
4265 #ifdef	DEBUG
4266 	if (2000 == n) {
4267 		IWP_DBG((IWP_DEBUG_HW, "iwp_stop_master(): "
4268 		    "timeout waiting for master stop\n"));
4269 	}
4270 #endif
4271 }
4272 
4273 static int
4274 iwp_power_up(iwp_sc_t *sc)
4275 {
4276 	uint32_t tmp;
4277 
4278 	iwp_mac_access_enter(sc);
4279 	tmp = iwp_reg_read(sc, ALM_APMG_PS_CTL);
4280 	tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC;
4281 	tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN;
4282 	iwp_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4283 	iwp_mac_access_exit(sc);
4284 
4285 	DELAY(5000);
4286 	return (IWP_SUCCESS);
4287 }
4288 
4289 /*
4290  * hardware initialization
4291  */
4292 static int
4293 iwp_preinit(iwp_sc_t *sc)
4294 {
4295 	int		n;
4296 	uint8_t		vlink;
4297 	uint16_t	radio_cfg;
4298 	uint32_t	tmp;
4299 
4300 	/*
4301 	 * clear any pending interrupts
4302 	 */
4303 	IWP_WRITE(sc, CSR_INT, 0xffffffff);
4304 
4305 	tmp = IWP_READ(sc, CSR_GIO_CHICKEN_BITS);
4306 	IWP_WRITE(sc, CSR_GIO_CHICKEN_BITS,
4307 	    tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
4308 
4309 	tmp = IWP_READ(sc, CSR_GP_CNTRL);
4310 	IWP_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4311 
4312 	/*
4313 	 * wait for clock ready
4314 	 */
4315 	for (n = 0; n < 1000; n++) {
4316 		if (IWP_READ(sc, CSR_GP_CNTRL) &
4317 		    CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) {
4318 			break;
4319 		}
4320 		DELAY(10);
4321 	}
4322 
4323 	if (1000 == n) {
4324 		return (ETIMEDOUT);
4325 	}
4326 
4327 	iwp_mac_access_enter(sc);
4328 
4329 	iwp_reg_write(sc, ALM_APMG_CLK_EN, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4330 
4331 	DELAY(20);
4332 	tmp = iwp_reg_read(sc, ALM_APMG_PCIDEV_STT);
4333 	iwp_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp |
4334 	    APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE);
4335 	iwp_mac_access_exit(sc);
4336 
4337 	radio_cfg = IWP_READ_EEP_SHORT(sc, EEP_SP_RADIO_CONFIGURATION);
4338 	if (SP_RADIO_TYPE_MSK(radio_cfg) < SP_RADIO_TYPE_MAX) {
4339 		tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4340 		IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4341 		    tmp | SP_RADIO_TYPE_MSK(radio_cfg) |
4342 		    SP_RADIO_STEP_MSK(radio_cfg) |
4343 		    SP_RADIO_DASH_MSK(radio_cfg));
4344 	} else {
4345 		cmn_err(CE_WARN, "iwp_preinit(): "
4346 		    "radio configuration information in eeprom is wrong\n");
4347 		return (IWP_FAIL);
4348 	}
4349 
4350 
4351 	IWP_WRITE(sc, CSR_INT_COALESCING, 512 / 32);
4352 
4353 	(void) iwp_power_up(sc);
4354 
4355 	if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) {
4356 		tmp = ddi_get32(sc->sc_cfg_handle,
4357 		    (uint32_t *)(sc->sc_cfg_base + 0xe8));
4358 		ddi_put32(sc->sc_cfg_handle,
4359 		    (uint32_t *)(sc->sc_cfg_base + 0xe8),
4360 		    tmp & ~(1 << 11));
4361 	}
4362 
4363 	vlink = ddi_get8(sc->sc_cfg_handle,
4364 	    (uint8_t *)(sc->sc_cfg_base + 0xf0));
4365 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0),
4366 	    vlink & ~2);
4367 
4368 	tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4369 	tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
4370 	    CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
4371 	IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG, tmp);
4372 
4373 	/*
4374 	 * make sure power supply on each part of the hardware
4375 	 */
4376 	iwp_mac_access_enter(sc);
4377 	tmp = iwp_reg_read(sc, ALM_APMG_PS_CTL);
4378 	tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4379 	iwp_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4380 	DELAY(5);
4381 
4382 	tmp = iwp_reg_read(sc, ALM_APMG_PS_CTL);
4383 	tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4384 	iwp_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4385 	iwp_mac_access_exit(sc);
4386 
4387 	if (PA_TYPE_MIX == sc->sc_chip_param.pa_type) {
4388 		IWP_WRITE(sc, CSR_GP_DRIVER_REG,
4389 		    CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_MIX);
4390 	}
4391 
4392 	if (PA_TYPE_INTER == sc->sc_chip_param.pa_type) {
4393 
4394 		IWP_WRITE(sc, CSR_GP_DRIVER_REG,
4395 		    CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
4396 	}
4397 
4398 	return (IWP_SUCCESS);
4399 }
4400 
4401 /*
4402  * set up semphore flag to own EEPROM
4403  */
4404 static int
4405 iwp_eep_sem_down(iwp_sc_t *sc)
4406 {
4407 	int count1, count2;
4408 	uint32_t tmp;
4409 
4410 	for (count1 = 0; count1 < 1000; count1++) {
4411 		tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4412 		IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4413 		    tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM);
4414 
4415 		for (count2 = 0; count2 < 2; count2++) {
4416 			if (IWP_READ(sc, CSR_HW_IF_CONFIG_REG) &
4417 			    CSR_HW_IF_CONFIG_REG_EEP_SEM) {
4418 				return (IWP_SUCCESS);
4419 			}
4420 			DELAY(10000);
4421 		}
4422 	}
4423 	return (IWP_FAIL);
4424 }
4425 
4426 /*
4427  * reset semphore flag to release EEPROM
4428  */
4429 static void
4430 iwp_eep_sem_up(iwp_sc_t *sc)
4431 {
4432 	uint32_t tmp;
4433 
4434 	tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4435 	IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4436 	    tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM));
4437 }
4438 
4439 /*
4440  * This function read all infomation from eeprom
4441  */
4442 static int
4443 iwp_eep_load(iwp_sc_t *sc)
4444 {
4445 	int i, rr;
4446 	uint32_t rv, tmp, eep_gp;
4447 	uint16_t addr, eep_sz = sizeof (sc->sc_eep_map);
4448 	uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map;
4449 
4450 	/*
4451 	 * read eeprom gp register in CSR
4452 	 */
4453 	eep_gp = IWP_READ(sc, CSR_EEPROM_GP);
4454 	if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) ==
4455 	    CSR_EEPROM_GP_BAD_SIGNATURE) {
4456 		IWP_DBG((IWP_DEBUG_EEPROM, "iwp_eep_load(): "
4457 		    "not find eeprom\n"));
4458 		return (IWP_FAIL);
4459 	}
4460 
4461 	rr = iwp_eep_sem_down(sc);
4462 	if (rr != 0) {
4463 		IWP_DBG((IWP_DEBUG_EEPROM, "iwp_eep_load(): "
4464 		    "driver failed to own EEPROM\n"));
4465 		return (IWP_FAIL);
4466 	}
4467 
4468 	for (addr = 0; addr < eep_sz; addr += 2) {
4469 		IWP_WRITE(sc, CSR_EEPROM_REG, addr<<1);
4470 		tmp = IWP_READ(sc, CSR_EEPROM_REG);
4471 		IWP_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2));
4472 
4473 		for (i = 0; i < 10; i++) {
4474 			rv = IWP_READ(sc, CSR_EEPROM_REG);
4475 			if (rv & 1) {
4476 				break;
4477 			}
4478 			DELAY(10);
4479 		}
4480 
4481 		if (!(rv & 1)) {
4482 			IWP_DBG((IWP_DEBUG_EEPROM, "iwp_eep_load(): "
4483 			    "time out when read eeprome\n"));
4484 			iwp_eep_sem_up(sc);
4485 			return (IWP_FAIL);
4486 		}
4487 
4488 		eep_p[addr/2] = LE_16(rv >> 16);
4489 	}
4490 
4491 	iwp_eep_sem_up(sc);
4492 	return (IWP_SUCCESS);
4493 }
4494 
4495 /*
4496  * initialize mac address in ieee80211com_t struct
4497  */
4498 static void
4499 iwp_get_mac_from_eep(iwp_sc_t *sc)
4500 {
4501 	ieee80211com_t *ic = &sc->sc_ic;
4502 
4503 	IEEE80211_ADDR_COPY(ic->ic_macaddr, &sc->sc_eep_map[EEP_MAC_ADDRESS]);
4504 
4505 	IWP_DBG((IWP_DEBUG_EEPROM, "iwp_get_mac_from_eep(): "
4506 	    "mac:%2x:%2x:%2x:%2x:%2x:%2x\n",
4507 	    ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2],
4508 	    ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5]));
4509 }
4510 
4511 /*
4512  * main initialization function
4513  */
4514 static int
4515 iwp_init(iwp_sc_t *sc)
4516 {
4517 	int err = IWP_FAIL;
4518 	clock_t clk;
4519 
4520 	/*
4521 	 * release buffer for calibration
4522 	 */
4523 	iwp_release_calib_buffer(sc);
4524 
4525 	mutex_enter(&sc->sc_glock);
4526 	atomic_and_32(&sc->sc_flags, ~IWP_F_FW_INIT);
4527 
4528 	err = iwp_init_common(sc);
4529 	if (err != IWP_SUCCESS) {
4530 		mutex_exit(&sc->sc_glock);
4531 		return (IWP_FAIL);
4532 	}
4533 
4534 	/*
4535 	 * backup ucode data part for future use.
4536 	 */
4537 	(void) memcpy(sc->sc_dma_fw_data_bak.mem_va,
4538 	    sc->sc_dma_fw_data.mem_va,
4539 	    sc->sc_dma_fw_data.alength);
4540 
4541 	/* load firmware init segment into NIC */
4542 	err = iwp_load_init_firmware(sc);
4543 	if (err != IWP_SUCCESS) {
4544 		cmn_err(CE_WARN, "iwp_init(): "
4545 		    "failed to setup init firmware\n");
4546 		mutex_exit(&sc->sc_glock);
4547 		return (IWP_FAIL);
4548 	}
4549 
4550 	/*
4551 	 * now press "execute" start running
4552 	 */
4553 	IWP_WRITE(sc, CSR_RESET, 0);
4554 
4555 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
4556 	while (!(sc->sc_flags & IWP_F_FW_INIT)) {
4557 		if (cv_timedwait(&sc->sc_ucode_cv,
4558 		    &sc->sc_glock, clk) < 0) {
4559 			break;
4560 		}
4561 	}
4562 
4563 	if (!(sc->sc_flags & IWP_F_FW_INIT)) {
4564 		cmn_err(CE_WARN, "iwp_init(): "
4565 		    "failed to process init alive.\n");
4566 		mutex_exit(&sc->sc_glock);
4567 		return (IWP_FAIL);
4568 	}
4569 
4570 	mutex_exit(&sc->sc_glock);
4571 
4572 	/*
4573 	 * stop chipset for initializing chipset again
4574 	 */
4575 	iwp_stop(sc);
4576 
4577 	mutex_enter(&sc->sc_glock);
4578 	atomic_and_32(&sc->sc_flags, ~IWP_F_FW_INIT);
4579 
4580 	err = iwp_init_common(sc);
4581 	if (err != IWP_SUCCESS) {
4582 		mutex_exit(&sc->sc_glock);
4583 		return (IWP_FAIL);
4584 	}
4585 
4586 	/*
4587 	 * load firmware run segment into NIC
4588 	 */
4589 	err = iwp_load_run_firmware(sc);
4590 	if (err != IWP_SUCCESS) {
4591 		cmn_err(CE_WARN, "iwp_init(): "
4592 		    "failed to setup run firmware\n");
4593 		mutex_exit(&sc->sc_glock);
4594 		return (IWP_FAIL);
4595 	}
4596 
4597 	/*
4598 	 * now press "execute" start running
4599 	 */
4600 	IWP_WRITE(sc, CSR_RESET, 0);
4601 
4602 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
4603 	while (!(sc->sc_flags & IWP_F_FW_INIT)) {
4604 		if (cv_timedwait(&sc->sc_ucode_cv,
4605 		    &sc->sc_glock, clk) < 0) {
4606 			break;
4607 		}
4608 	}
4609 
4610 	if (!(sc->sc_flags & IWP_F_FW_INIT)) {
4611 		cmn_err(CE_WARN, "iwp_init(): "
4612 		    "failed to process runtime alive.\n");
4613 		mutex_exit(&sc->sc_glock);
4614 		return (IWP_FAIL);
4615 	}
4616 
4617 	mutex_exit(&sc->sc_glock);
4618 
4619 	DELAY(1000);
4620 
4621 	mutex_enter(&sc->sc_glock);
4622 	atomic_and_32(&sc->sc_flags, ~IWP_F_FW_INIT);
4623 
4624 	/*
4625 	 * at this point, the firmware is loaded OK, then config the hardware
4626 	 * with the ucode API, including rxon, txpower, etc.
4627 	 */
4628 	err = iwp_config(sc);
4629 	if (err) {
4630 		cmn_err(CE_WARN, "iwp_init(): "
4631 		    "failed to configure device\n");
4632 		mutex_exit(&sc->sc_glock);
4633 		return (IWP_FAIL);
4634 	}
4635 
4636 	/*
4637 	 * at this point, hardware may receive beacons :)
4638 	 */
4639 	mutex_exit(&sc->sc_glock);
4640 	return (IWP_SUCCESS);
4641 }
4642 
4643 /*
4644  * stop or disable NIC
4645  */
4646 static void
4647 iwp_stop(iwp_sc_t *sc)
4648 {
4649 	uint32_t tmp;
4650 	int i;
4651 
4652 	/* by pass if it's quiesced */
4653 	if (!(sc->sc_flags & IWP_F_QUIESCED)) {
4654 		mutex_enter(&sc->sc_glock);
4655 	}
4656 
4657 	IWP_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
4658 	/*
4659 	 * disable interrupts
4660 	 */
4661 	IWP_WRITE(sc, CSR_INT_MASK, 0);
4662 	IWP_WRITE(sc, CSR_INT, CSR_INI_SET_MASK);
4663 	IWP_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff);
4664 
4665 	/*
4666 	 * reset all Tx rings
4667 	 */
4668 	for (i = 0; i < IWP_NUM_QUEUES; i++) {
4669 		iwp_reset_tx_ring(sc, &sc->sc_txq[i]);
4670 	}
4671 
4672 	/*
4673 	 * reset Rx ring
4674 	 */
4675 	iwp_reset_rx_ring(sc);
4676 
4677 	iwp_mac_access_enter(sc);
4678 	iwp_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4679 	iwp_mac_access_exit(sc);
4680 
4681 	DELAY(5);
4682 
4683 	iwp_stop_master(sc);
4684 
4685 	mutex_enter(&sc->sc_mt_lock);
4686 	sc->sc_tx_timer = 0;
4687 	mutex_exit(&sc->sc_mt_lock);
4688 
4689 	tmp = IWP_READ(sc, CSR_RESET);
4690 	IWP_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET);
4691 
4692 	/* by pass if it's quiesced */
4693 	if (!(sc->sc_flags & IWP_F_QUIESCED)) {
4694 		mutex_exit(&sc->sc_glock);
4695 	}
4696 }
4697 
4698 /*
4699  * Naive implementation of the Adaptive Multi Rate Retry algorithm:
4700  * "IEEE 802.11 Rate Adaptation: A Practical Approach"
4701  * Mathieu Lacage, Hossein Manshaei, Thierry Turletti
4702  * INRIA Sophia - Projet Planete
4703  * http://www-sop.inria.fr/rapports/sophia/RR-5208.html
4704  */
4705 #define	is_success(amrr)	\
4706 	((amrr)->retrycnt < (amrr)->txcnt / 10)
4707 #define	is_failure(amrr)	\
4708 	((amrr)->retrycnt > (amrr)->txcnt / 3)
4709 #define	is_enough(amrr)		\
4710 	((amrr)->txcnt > 200)
4711 #define	not_very_few(amrr)	\
4712 	((amrr)->txcnt > 40)
4713 #define	is_min_rate(in)		\
4714 	(0 == (in)->in_txrate)
4715 #define	is_max_rate(in)		\
4716 	((in)->in_rates.ir_nrates - 1 == (in)->in_txrate)
4717 #define	increase_rate(in)	\
4718 	((in)->in_txrate++)
4719 #define	decrease_rate(in)	\
4720 	((in)->in_txrate--)
4721 #define	reset_cnt(amrr)		\
4722 	{ (amrr)->txcnt = (amrr)->retrycnt = 0; }
4723 
4724 #define	IWP_AMRR_MIN_SUCCESS_THRESHOLD	 1
4725 #define	IWP_AMRR_MAX_SUCCESS_THRESHOLD	15
4726 
4727 static void
4728 iwp_amrr_init(iwp_amrr_t *amrr)
4729 {
4730 	amrr->success = 0;
4731 	amrr->recovery = 0;
4732 	amrr->txcnt = amrr->retrycnt = 0;
4733 	amrr->success_threshold = IWP_AMRR_MIN_SUCCESS_THRESHOLD;
4734 }
4735 
4736 static void
4737 iwp_amrr_timeout(iwp_sc_t *sc)
4738 {
4739 	ieee80211com_t *ic = &sc->sc_ic;
4740 
4741 	IWP_DBG((IWP_DEBUG_RATECTL, "iwp_amrr_timeout(): "
4742 	    "enter\n"));
4743 
4744 	if (IEEE80211_M_STA == ic->ic_opmode) {
4745 		iwp_amrr_ratectl(NULL, ic->ic_bss);
4746 	} else {
4747 		ieee80211_iterate_nodes(&ic->ic_sta, iwp_amrr_ratectl, NULL);
4748 	}
4749 
4750 	sc->sc_clk = ddi_get_lbolt();
4751 }
4752 
4753 /* ARGSUSED */
4754 static void
4755 iwp_amrr_ratectl(void *arg, ieee80211_node_t *in)
4756 {
4757 	iwp_amrr_t *amrr = (iwp_amrr_t *)in;
4758 	int need_change = 0;
4759 
4760 	if (is_success(amrr) && is_enough(amrr)) {
4761 		amrr->success++;
4762 		if (amrr->success >= amrr->success_threshold &&
4763 		    !is_max_rate(in)) {
4764 			amrr->recovery = 1;
4765 			amrr->success = 0;
4766 			increase_rate(in);
4767 			IWP_DBG((IWP_DEBUG_RATECTL, "iwp_amrr_ratectl(): "
4768 			    "AMRR increasing rate %d "
4769 			    "(txcnt=%d retrycnt=%d)\n",
4770 			    in->in_txrate, amrr->txcnt,
4771 			    amrr->retrycnt));
4772 			need_change = 1;
4773 		} else {
4774 			amrr->recovery = 0;
4775 		}
4776 	} else if (not_very_few(amrr) && is_failure(amrr)) {
4777 		amrr->success = 0;
4778 		if (!is_min_rate(in)) {
4779 			if (amrr->recovery) {
4780 				amrr->success_threshold++;
4781 				if (amrr->success_threshold >
4782 				    IWP_AMRR_MAX_SUCCESS_THRESHOLD) {
4783 					amrr->success_threshold =
4784 					    IWP_AMRR_MAX_SUCCESS_THRESHOLD;
4785 				}
4786 			} else {
4787 				amrr->success_threshold =
4788 				    IWP_AMRR_MIN_SUCCESS_THRESHOLD;
4789 			}
4790 			decrease_rate(in);
4791 			IWP_DBG((IWP_DEBUG_RATECTL, "iwp_amrr_ratectl(): "
4792 			    "AMRR decreasing rate %d "
4793 			    "(txcnt=%d retrycnt=%d)\n",
4794 			    in->in_txrate, amrr->txcnt,
4795 			    amrr->retrycnt));
4796 			need_change = 1;
4797 		}
4798 		amrr->recovery = 0;	/* paper is incorrect */
4799 	}
4800 
4801 	if (is_enough(amrr) || need_change) {
4802 		reset_cnt(amrr);
4803 	}
4804 }
4805 
4806 /*
4807  * translate indirect address in eeprom to direct address
4808  * in eeprom and return address of entry whos indirect address
4809  * is indi_addr
4810  */
4811 static uint8_t *
4812 iwp_eep_addr_trans(iwp_sc_t *sc, uint32_t indi_addr)
4813 {
4814 	uint32_t	di_addr;
4815 	uint16_t	temp;
4816 
4817 	if (!(indi_addr & INDIRECT_ADDRESS)) {
4818 		di_addr = indi_addr;
4819 		return (&sc->sc_eep_map[di_addr]);
4820 	}
4821 
4822 	switch (indi_addr & INDIRECT_TYPE_MSK) {
4823 	case INDIRECT_GENERAL:
4824 		temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_GENERAL);
4825 		break;
4826 	case	INDIRECT_HOST:
4827 		temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_HOST);
4828 		break;
4829 	case	INDIRECT_REGULATORY:
4830 		temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_REGULATORY);
4831 		break;
4832 	case	INDIRECT_CALIBRATION:
4833 		temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_CALIBRATION);
4834 		break;
4835 	case	INDIRECT_PROCESS_ADJST:
4836 		temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_PROCESS_ADJST);
4837 		break;
4838 	case	INDIRECT_OTHERS:
4839 		temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_OTHERS);
4840 		break;
4841 	default:
4842 		temp = 0;
4843 		cmn_err(CE_WARN, "iwp_eep_addr_trans(): "
4844 		    "incorrect indirect eeprom address.\n");
4845 		break;
4846 	}
4847 
4848 	di_addr = (indi_addr & ADDRESS_MSK) + (temp << 1);
4849 
4850 	return (&sc->sc_eep_map[di_addr]);
4851 }
4852 
4853 /*
4854  * loade a section of ucode into NIC
4855  */
4856 static int
4857 iwp_put_seg_fw(iwp_sc_t *sc, uint32_t addr_s, uint32_t addr_d, uint32_t len)
4858 {
4859 
4860 	iwp_mac_access_enter(sc);
4861 
4862 	IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(IWP_FH_SRVC_CHNL),
4863 	    IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
4864 
4865 	IWP_WRITE(sc, IWP_FH_SRVC_CHNL_SRAM_ADDR_REG(IWP_FH_SRVC_CHNL), addr_d);
4866 
4867 	IWP_WRITE(sc, IWP_FH_TFDIB_CTRL0_REG(IWP_FH_SRVC_CHNL),
4868 	    (addr_s & FH_MEM_TFDIB_DRAM_ADDR_LSB_MASK));
4869 
4870 	IWP_WRITE(sc, IWP_FH_TFDIB_CTRL1_REG(IWP_FH_SRVC_CHNL), len);
4871 
4872 	IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_BUF_STS_REG(IWP_FH_SRVC_CHNL),
4873 	    (1 << IWP_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
4874 	    (1 << IWP_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
4875 	    IWP_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
4876 
4877 	IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(IWP_FH_SRVC_CHNL),
4878 	    IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
4879 	    IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL |
4880 	    IWP_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
4881 
4882 	iwp_mac_access_exit(sc);
4883 
4884 	return (IWP_SUCCESS);
4885 }
4886 
4887 /*
4888  * necessary setting during alive notification
4889  */
4890 static int
4891 iwp_alive_common(iwp_sc_t *sc)
4892 {
4893 	uint32_t	base;
4894 	uint32_t	i;
4895 	iwp_wimax_coex_cmd_t	w_cmd;
4896 	iwp_calibration_crystal_cmd_t	c_cmd;
4897 	uint32_t	rv = IWP_FAIL;
4898 
4899 	/*
4900 	 * initialize SCD related registers to make TX work.
4901 	 */
4902 	iwp_mac_access_enter(sc);
4903 
4904 	/*
4905 	 * read sram address of data base.
4906 	 */
4907 	sc->sc_scd_base = iwp_reg_read(sc, IWP_SCD_SRAM_BASE_ADDR);
4908 
4909 	for (base = sc->sc_scd_base + IWP_SCD_CONTEXT_DATA_OFFSET;
4910 	    base < sc->sc_scd_base + IWP_SCD_TX_STTS_BITMAP_OFFSET;
4911 	    base += 4) {
4912 		iwp_mem_write(sc, base, 0);
4913 	}
4914 
4915 	for (; base < sc->sc_scd_base + IWP_SCD_TRANSLATE_TBL_OFFSET;
4916 	    base += 4) {
4917 		iwp_mem_write(sc, base, 0);
4918 	}
4919 
4920 	for (i = 0; i < sizeof (uint16_t) * IWP_NUM_QUEUES; i += 4) {
4921 		iwp_mem_write(sc, base + i, 0);
4922 	}
4923 
4924 	iwp_reg_write(sc, IWP_SCD_DRAM_BASE_ADDR,
4925 	    sc->sc_dma_sh.cookie.dmac_address >> 10);
4926 
4927 	iwp_reg_write(sc, IWP_SCD_QUEUECHAIN_SEL,
4928 	    IWP_SCD_QUEUECHAIN_SEL_ALL(IWP_NUM_QUEUES));
4929 
4930 	iwp_reg_write(sc, IWP_SCD_AGGR_SEL, 0);
4931 
4932 	for (i = 0; i < IWP_NUM_QUEUES; i++) {
4933 		iwp_reg_write(sc, IWP_SCD_QUEUE_RDPTR(i), 0);
4934 		IWP_WRITE(sc, HBUS_TARG_WRPTR, 0 | (i << 8));
4935 		iwp_mem_write(sc, sc->sc_scd_base +
4936 		    IWP_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
4937 		iwp_mem_write(sc, sc->sc_scd_base +
4938 		    IWP_SCD_CONTEXT_QUEUE_OFFSET(i) +
4939 		    sizeof (uint32_t),
4940 		    ((SCD_WIN_SIZE << IWP_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
4941 		    IWP_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
4942 		    ((SCD_FRAME_LIMIT <<
4943 		    IWP_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
4944 		    IWP_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
4945 	}
4946 
4947 	iwp_reg_write(sc, IWP_SCD_INTERRUPT_MASK, (1 << IWP_NUM_QUEUES) - 1);
4948 
4949 	iwp_reg_write(sc, (IWP_SCD_BASE + 0x10),
4950 	    SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
4951 
4952 	IWP_WRITE(sc, HBUS_TARG_WRPTR, (IWP_CMD_QUEUE_NUM << 8));
4953 	iwp_reg_write(sc, IWP_SCD_QUEUE_RDPTR(IWP_CMD_QUEUE_NUM), 0);
4954 
4955 	/*
4956 	 * queue 0-7 map to FIFO 0-7 and
4957 	 * all queues work under FIFO mode(none-scheduler_ack)
4958 	 */
4959 	for (i = 0; i < 4; i++) {
4960 		iwp_reg_write(sc, IWP_SCD_QUEUE_STATUS_BITS(i),
4961 		    (1 << IWP_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
4962 		    ((3-i) << IWP_SCD_QUEUE_STTS_REG_POS_TXF) |
4963 		    (1 << IWP_SCD_QUEUE_STTS_REG_POS_WSL) |
4964 		    IWP_SCD_QUEUE_STTS_REG_MSK);
4965 	}
4966 
4967 	iwp_reg_write(sc, IWP_SCD_QUEUE_STATUS_BITS(IWP_CMD_QUEUE_NUM),
4968 	    (1 << IWP_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
4969 	    (IWP_CMD_FIFO_NUM << IWP_SCD_QUEUE_STTS_REG_POS_TXF) |
4970 	    (1 << IWP_SCD_QUEUE_STTS_REG_POS_WSL) |
4971 	    IWP_SCD_QUEUE_STTS_REG_MSK);
4972 
4973 	for (i = 5; i < 7; i++) {
4974 		iwp_reg_write(sc, IWP_SCD_QUEUE_STATUS_BITS(i),
4975 		    (1 << IWP_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
4976 		    (i << IWP_SCD_QUEUE_STTS_REG_POS_TXF) |
4977 		    (1 << IWP_SCD_QUEUE_STTS_REG_POS_WSL) |
4978 		    IWP_SCD_QUEUE_STTS_REG_MSK);
4979 	}
4980 
4981 	iwp_mac_access_exit(sc);
4982 
4983 	(void) memset(&w_cmd, 0, sizeof (w_cmd));
4984 
4985 	rv = iwp_cmd(sc, COEX_PRIORITY_TABLE_CMD, &w_cmd, sizeof (w_cmd), 1);
4986 	if (rv != IWP_SUCCESS) {
4987 		cmn_err(CE_WARN, "iwp_alive_common(): "
4988 		    "failed to send wimax coexist command.\n");
4989 		return (rv);
4990 	}
4991 
4992 	(void) memset(&c_cmd, 0, sizeof (c_cmd));
4993 
4994 	c_cmd.opCode = PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
4995 	c_cmd.data.cap_pin1 = LE_16(sc->sc_eep_calib->xtal_calib[0]);
4996 	c_cmd.data.cap_pin2 = LE_16(sc->sc_eep_calib->xtal_calib[1]);
4997 
4998 	rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD, &c_cmd, sizeof (c_cmd), 1);
4999 	if (rv != IWP_SUCCESS) {
5000 		cmn_err(CE_WARN, "iwp_alive_common(): "
5001 		    "failed to send crystal frq calibration command.\n");
5002 		return (rv);
5003 	}
5004 
5005 	/*
5006 	 * make sure crystal frequency calibration ready
5007 	 * before next operations.
5008 	 */
5009 	DELAY(1000);
5010 
5011 	return (IWP_SUCCESS);
5012 }
5013 
5014 /*
5015  * save results of calibration from ucode
5016  */
5017 static void
5018 iwp_save_calib_result(iwp_sc_t *sc, iwp_rx_desc_t *desc)
5019 {
5020 	struct iwp_calib_results *res_p = &sc->sc_calib_results;
5021 	struct iwp_calib_hdr *calib_hdr = (struct iwp_calib_hdr *)(desc + 1);
5022 	int len = LE_32(desc->len);
5023 
5024 	/*
5025 	 * ensure the size of buffer is not too big
5026 	 */
5027 	len = (len & FH_RSCSR_FRAME_SIZE_MASK) - 4;
5028 
5029 	switch (calib_hdr->op_code) {
5030 	case PHY_CALIBRATE_LO_CMD:
5031 		if (NULL == res_p->lo_res) {
5032 			res_p->lo_res = kmem_alloc(len, KM_NOSLEEP);
5033 		}
5034 
5035 		if (NULL == res_p->lo_res) {
5036 			cmn_err(CE_WARN, "iwp_save_calib_result(): "
5037 			    "failed to allocate memory.\n");
5038 			return;
5039 		}
5040 
5041 		res_p->lo_res_len = len;
5042 		(void) memcpy(res_p->lo_res, calib_hdr, len);
5043 		break;
5044 	case PHY_CALIBRATE_TX_IQ_CMD:
5045 		if (NULL == res_p->tx_iq_res) {
5046 			res_p->tx_iq_res = kmem_alloc(len, KM_NOSLEEP);
5047 		}
5048 
5049 		if (NULL == res_p->tx_iq_res) {
5050 			cmn_err(CE_WARN, "iwp_save_calib_result(): "
5051 			    "failed to allocate memory.\n");
5052 			return;
5053 		}
5054 
5055 		res_p->tx_iq_res_len = len;
5056 		(void) memcpy(res_p->tx_iq_res, calib_hdr, len);
5057 		break;
5058 	case PHY_CALIBRATE_TX_IQ_PERD_CMD:
5059 		if (NULL == res_p->tx_iq_perd_res) {
5060 			res_p->tx_iq_perd_res = kmem_alloc(len, KM_NOSLEEP);
5061 		}
5062 
5063 		if (NULL == res_p->tx_iq_perd_res) {
5064 			cmn_err(CE_WARN, "iwp_save_calib_result(): "
5065 			    "failed to allocate memory.\n");
5066 		}
5067 
5068 		res_p->tx_iq_perd_res_len = len;
5069 		(void) memcpy(res_p->tx_iq_perd_res, calib_hdr, len);
5070 		break;
5071 	case PHY_CALIBRATE_BASE_BAND_CMD:
5072 		if (NULL == res_p->base_band_res) {
5073 			res_p->base_band_res = kmem_alloc(len, KM_NOSLEEP);
5074 		}
5075 
5076 		if (NULL == res_p->base_band_res) {
5077 			cmn_err(CE_WARN, "iwp_save_calib_result(): "
5078 			    "failed to allocate memory.\n");
5079 		}
5080 
5081 		res_p->base_band_res_len = len;
5082 		(void) memcpy(res_p->base_band_res, calib_hdr, len);
5083 		break;
5084 	default:
5085 		cmn_err(CE_WARN, "iwp_save_calib_result(): "
5086 		    "incorrect calibration type(%d).\n", calib_hdr->op_code);
5087 		break;
5088 	}
5089 
5090 }
5091 
5092 static void
5093 iwp_release_calib_buffer(iwp_sc_t *sc)
5094 {
5095 	if (sc->sc_calib_results.lo_res != NULL) {
5096 		kmem_free(sc->sc_calib_results.lo_res,
5097 		    sc->sc_calib_results.lo_res_len);
5098 		sc->sc_calib_results.lo_res = NULL;
5099 	}
5100 
5101 	if (sc->sc_calib_results.tx_iq_res != NULL) {
5102 		kmem_free(sc->sc_calib_results.tx_iq_res,
5103 		    sc->sc_calib_results.tx_iq_res_len);
5104 		sc->sc_calib_results.tx_iq_res = NULL;
5105 	}
5106 
5107 	if (sc->sc_calib_results.tx_iq_perd_res != NULL) {
5108 		kmem_free(sc->sc_calib_results.tx_iq_perd_res,
5109 		    sc->sc_calib_results.tx_iq_perd_res_len);
5110 		sc->sc_calib_results.tx_iq_perd_res = NULL;
5111 	}
5112 
5113 	if (sc->sc_calib_results.base_band_res != NULL) {
5114 		kmem_free(sc->sc_calib_results.base_band_res,
5115 		    sc->sc_calib_results.base_band_res_len);
5116 		sc->sc_calib_results.base_band_res = NULL;
5117 	}
5118 
5119 }
5120 
5121 /*
5122  * common section of intialization
5123  */
5124 static int
5125 iwp_init_common(iwp_sc_t *sc)
5126 {
5127 	int32_t	qid;
5128 	uint32_t tmp;
5129 
5130 	(void) iwp_preinit(sc);
5131 
5132 	tmp = IWP_READ(sc, CSR_GP_CNTRL);
5133 	if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) {
5134 		cmn_err(CE_NOTE, "iwp_init_common(): "
5135 		    "radio transmitter is off\n");
5136 		return (IWP_FAIL);
5137 	}
5138 
5139 	/*
5140 	 * init Rx ring
5141 	 */
5142 	iwp_mac_access_enter(sc);
5143 	IWP_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
5144 
5145 	IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
5146 	IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
5147 	    sc->sc_rxq.dma_desc.cookie.dmac_address >> 8);
5148 
5149 	IWP_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG,
5150 	    ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address +
5151 	    offsetof(struct iwp_shared, val0)) >> 4));
5152 
5153 	IWP_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG,
5154 	    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
5155 	    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
5156 	    IWP_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
5157 	    (RX_QUEUE_SIZE_LOG <<
5158 	    FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
5159 	iwp_mac_access_exit(sc);
5160 	IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG,
5161 	    (RX_QUEUE_SIZE - 1) & ~0x7);
5162 
5163 	/*
5164 	 * init Tx rings
5165 	 */
5166 	iwp_mac_access_enter(sc);
5167 	iwp_reg_write(sc, IWP_SCD_TXFACT, 0);
5168 
5169 	/*
5170 	 * keep warm page
5171 	 */
5172 	IWP_WRITE(sc, IWP_FH_KW_MEM_ADDR_REG,
5173 	    sc->sc_dma_kw.cookie.dmac_address >> 4);
5174 
5175 	for (qid = 0; qid < IWP_NUM_QUEUES; qid++) {
5176 		IWP_WRITE(sc, FH_MEM_CBBC_QUEUE(qid),
5177 		    sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8);
5178 		IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(qid),
5179 		    IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
5180 		    IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
5181 	}
5182 
5183 	iwp_mac_access_exit(sc);
5184 
5185 	/*
5186 	 * clear "radio off" and "disable command" bits
5187 	 */
5188 	IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5189 	IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR,
5190 	    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5191 
5192 	/*
5193 	 * clear any pending interrupts
5194 	 */
5195 	IWP_WRITE(sc, CSR_INT, 0xffffffff);
5196 
5197 	/*
5198 	 * enable interrupts
5199 	 */
5200 	IWP_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
5201 
5202 	IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5203 	IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5204 
5205 	return (IWP_SUCCESS);
5206 }
5207 
5208 static int
5209 iwp_fast_recover(iwp_sc_t *sc)
5210 {
5211 	ieee80211com_t *ic = &sc->sc_ic;
5212 	int err = IWP_FAIL;
5213 
5214 	mutex_enter(&sc->sc_glock);
5215 
5216 	/* restore runtime configuration */
5217 	bcopy(&sc->sc_config_save, &sc->sc_config,
5218 	    sizeof (sc->sc_config));
5219 
5220 	sc->sc_config.assoc_id = 0;
5221 	sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
5222 
5223 	if ((err = iwp_hw_set_before_auth(sc)) != IWP_SUCCESS) {
5224 		cmn_err(CE_WARN, "iwp_fast_recover(): "
5225 		    "could not setup authentication\n");
5226 		mutex_exit(&sc->sc_glock);
5227 		return (err);
5228 	}
5229 
5230 	bcopy(&sc->sc_config_save, &sc->sc_config,
5231 	    sizeof (sc->sc_config));
5232 
5233 	/* update adapter's configuration */
5234 	err = iwp_run_state_config(sc);
5235 	if (err != IWP_SUCCESS) {
5236 		cmn_err(CE_WARN, "iwp_fast_recover(): "
5237 		    "failed to setup association\n");
5238 		mutex_exit(&sc->sc_glock);
5239 		return (err);
5240 	}
5241 	/* set LED on */
5242 	iwp_set_led(sc, 2, 0, 1);
5243 
5244 	mutex_exit(&sc->sc_glock);
5245 
5246 	atomic_and_32(&sc->sc_flags, ~IWP_F_HW_ERR_RECOVER);
5247 
5248 	/* start queue */
5249 	IWP_DBG((IWP_DEBUG_FW, "iwp_fast_recover(): "
5250 	    "resume xmit\n"));
5251 	mac_tx_update(ic->ic_mach);
5252 
5253 	return (IWP_SUCCESS);
5254 }
5255 
5256 static int
5257 iwp_run_state_config(iwp_sc_t *sc)
5258 {
5259 	struct ieee80211com *ic = &sc->sc_ic;
5260 	ieee80211_node_t *in = ic->ic_bss;
5261 	int err = IWP_FAIL;
5262 
5263 	/*
5264 	 * update adapter's configuration
5265 	 */
5266 	sc->sc_config.assoc_id = in->in_associd & 0x3fff;
5267 
5268 	/*
5269 	 * short preamble/slot time are
5270 	 * negotiated when associating
5271 	 */
5272 	sc->sc_config.flags &=
5273 	    ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
5274 	    RXON_FLG_SHORT_SLOT_MSK);
5275 
5276 	if (ic->ic_flags & IEEE80211_F_SHSLOT) {
5277 		sc->sc_config.flags |=
5278 		    LE_32(RXON_FLG_SHORT_SLOT_MSK);
5279 	}
5280 
5281 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
5282 		sc->sc_config.flags |=
5283 		    LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
5284 	}
5285 
5286 	sc->sc_config.filter_flags |=
5287 	    LE_32(RXON_FILTER_ASSOC_MSK);
5288 
5289 	if (ic->ic_opmode != IEEE80211_M_STA) {
5290 		sc->sc_config.filter_flags |=
5291 		    LE_32(RXON_FILTER_BCON_AWARE_MSK);
5292 	}
5293 
5294 	IWP_DBG((IWP_DEBUG_80211, "iwp_run_state_config(): "
5295 	    "config chan %d flags %x"
5296 	    " filter_flags %x\n",
5297 	    sc->sc_config.chan, sc->sc_config.flags,
5298 	    sc->sc_config.filter_flags));
5299 
5300 	err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
5301 	    sizeof (iwp_rxon_cmd_t), 1);
5302 	if (err != IWP_SUCCESS) {
5303 		cmn_err(CE_WARN, "iwp_run_state_config(): "
5304 		    "could not update configuration\n");
5305 		return (err);
5306 	}
5307 
5308 	return (err);
5309 }
5310 
5311 /*
5312  * This function overwrites default configurations of
5313  * ieee80211com structure in Net80211 module.
5314  */
5315 static void
5316 iwp_overwrite_ic_default(iwp_sc_t *sc)
5317 {
5318 	ieee80211com_t *ic = &sc->sc_ic;
5319 
5320 	sc->sc_newstate = ic->ic_newstate;
5321 	ic->ic_newstate = iwp_newstate;
5322 	ic->ic_node_alloc = iwp_node_alloc;
5323 	ic->ic_node_free = iwp_node_free;
5324 }
5325 
5326 
5327 /*
5328  * This function adds AP station into hardware.
5329  */
5330 static int
5331 iwp_add_ap_sta(iwp_sc_t *sc)
5332 {
5333 	ieee80211com_t *ic = &sc->sc_ic;
5334 	ieee80211_node_t *in = ic->ic_bss;
5335 	iwp_add_sta_t node;
5336 	int err = IWP_FAIL;
5337 
5338 	/*
5339 	 * Add AP node into hardware.
5340 	 */
5341 	(void) memset(&node, 0, sizeof (node));
5342 	IEEE80211_ADDR_COPY(node.sta.addr, in->in_bssid);
5343 	node.mode = STA_MODE_ADD_MSK;
5344 	node.sta.sta_id = IWP_AP_ID;
5345 
5346 	err = iwp_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
5347 	if (err != IWP_SUCCESS) {
5348 		cmn_err(CE_WARN, "iwp_add_ap_sta(): "
5349 		    "failed to add AP node\n");
5350 		return (err);
5351 	}
5352 
5353 	return (err);
5354 }
5355 
5356 /*
5357  * Check EEPROM version and Calibration version.
5358  */
5359 static int
5360 iwp_eep_ver_chk(iwp_sc_t *sc)
5361 {
5362 	if ((IWP_READ_EEP_SHORT(sc, EEP_VERSION) < 0x011a) ||
5363 	    (sc->sc_eep_calib->tx_pow_calib_hdr.calib_version < 4)) {
5364 		cmn_err(CE_WARN, "iwp_eep_ver_chk(): "
5365 		    "unsupported eeprom detected\n");
5366 		return (IWP_FAIL);
5367 	}
5368 
5369 	return (IWP_SUCCESS);
5370 }
5371 
5372 /*
5373  * Determine parameters for all supported chips.
5374  */
5375 static void
5376 iwp_set_chip_param(iwp_sc_t *sc)
5377 {
5378 	if ((0x008d == sc->sc_dev_id) ||
5379 	    (0x008e == sc->sc_dev_id)) {
5380 		sc->sc_chip_param.phy_mode = PHY_MODE_G |
5381 		    PHY_MODE_A | PHY_MODE_N;
5382 
5383 		sc->sc_chip_param.tx_ant = ANT_A | ANT_B;
5384 		sc->sc_chip_param.rx_ant = ANT_A | ANT_B;
5385 
5386 		sc->sc_chip_param.pa_type = PA_TYPE_MIX;
5387 	}
5388 
5389 	if ((0x422c == sc->sc_dev_id) ||
5390 	    (0x4239 == sc->sc_dev_id)) {
5391 		sc->sc_chip_param.phy_mode = PHY_MODE_G |
5392 		    PHY_MODE_A | PHY_MODE_N;
5393 
5394 		sc->sc_chip_param.tx_ant = ANT_B | ANT_C;
5395 		sc->sc_chip_param.rx_ant = ANT_B | ANT_C;
5396 
5397 		sc->sc_chip_param.pa_type = PA_TYPE_INTER;
5398 	}
5399 
5400 	if ((0x422b == sc->sc_dev_id) ||
5401 	    (0x4238 == sc->sc_dev_id)) {
5402 		sc->sc_chip_param.phy_mode = PHY_MODE_G |
5403 		    PHY_MODE_A | PHY_MODE_N;
5404 
5405 		sc->sc_chip_param.tx_ant = ANT_A | ANT_B | ANT_C;
5406 		sc->sc_chip_param.rx_ant = ANT_A | ANT_B | ANT_C;
5407 
5408 		sc->sc_chip_param.pa_type = PA_TYPE_SYSTEM;
5409 	}
5410 }
5411