xref: /illumos-gate/usr/src/uts/common/io/iwp/iwp.c (revision ed093b41a93e8563e6e1e5dae0768dda2a7bcc27)
1 /*
2  * Copyright (c) 2018, Joyent, Inc.
3  */
4 
5 /*
6  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
7  * Use is subject to license terms.
8  */
9 
10 /*
11  * Copyright (c) 2009, Intel Corporation
12  * All rights reserved.
13  */
14 
15 /*
16  * Copyright (c) 2006
17  * Copyright (c) 2007
18  *	Damien Bergamini <damien.bergamini@free.fr>
19  *
20  * Permission to use, copy, modify, and distribute this software for any
21  * purpose with or without fee is hereby granted, provided that the above
22  * copyright notice and this permission notice appear in all copies.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
25  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
26  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
27  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
28  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
29  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
30  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
31  */
32 
33 /*
34  * Intel(R) WiFi Link 6000 Driver
35  */
36 
37 #include <sys/types.h>
38 #include <sys/byteorder.h>
39 #include <sys/conf.h>
40 #include <sys/cmn_err.h>
41 #include <sys/stat.h>
42 #include <sys/ddi.h>
43 #include <sys/sunddi.h>
44 #include <sys/strsubr.h>
45 #include <sys/ethernet.h>
46 #include <inet/common.h>
47 #include <inet/nd.h>
48 #include <inet/mi.h>
49 #include <sys/note.h>
50 #include <sys/stream.h>
51 #include <sys/strsun.h>
52 #include <sys/modctl.h>
53 #include <sys/devops.h>
54 #include <sys/dlpi.h>
55 #include <sys/mac_provider.h>
56 #include <sys/mac_wifi.h>
57 #include <sys/net80211.h>
58 #include <sys/net80211_proto.h>
59 #include <sys/varargs.h>
60 #include <sys/policy.h>
61 #include <sys/pci.h>
62 
63 #include "iwp_calibration.h"
64 #include "iwp_hw.h"
65 #include "iwp_eeprom.h"
66 #include "iwp_var.h"
67 #include <inet/wifi_ioctl.h>
68 
69 #ifdef DEBUG
70 #define	IWP_DEBUG_80211		(1 << 0)
71 #define	IWP_DEBUG_CMD		(1 << 1)
72 #define	IWP_DEBUG_DMA		(1 << 2)
73 #define	IWP_DEBUG_EEPROM	(1 << 3)
74 #define	IWP_DEBUG_FW		(1 << 4)
75 #define	IWP_DEBUG_HW		(1 << 5)
76 #define	IWP_DEBUG_INTR		(1 << 6)
77 #define	IWP_DEBUG_MRR		(1 << 7)
78 #define	IWP_DEBUG_PIO		(1 << 8)
79 #define	IWP_DEBUG_RX		(1 << 9)
80 #define	IWP_DEBUG_SCAN		(1 << 10)
81 #define	IWP_DEBUG_TX		(1 << 11)
82 #define	IWP_DEBUG_RATECTL	(1 << 12)
83 #define	IWP_DEBUG_RADIO		(1 << 13)
84 #define	IWP_DEBUG_RESUME	(1 << 14)
85 #define	IWP_DEBUG_CALIBRATION	(1 << 15)
86 /*
87  * if want to see debug message of a given section,
88  * please set this flag to one of above values
89  */
90 uint32_t iwp_dbg_flags = 0;
91 #define	IWP_DBG(x) \
92 	iwp_dbg x
93 #else
94 #define	IWP_DBG(x)
95 #endif
96 
97 static void	*iwp_soft_state_p = NULL;
98 
99 /*
100  * ucode will be compiled into driver image
101  */
102 static uint8_t iwp_fw_bin [] = {
103 #include "fw-iw/iwp.ucode"
104 };
105 
106 /*
107  * DMA attributes for a shared page
108  */
109 static ddi_dma_attr_t sh_dma_attr = {
110 	DMA_ATTR_V0,	/* version of this structure */
111 	0,		/* lowest usable address */
112 	0xffffffffU,	/* highest usable address */
113 	0xffffffffU,	/* maximum DMAable byte count */
114 	0x1000,		/* alignment in bytes */
115 	0x1000,		/* burst sizes (any?) */
116 	1,		/* minimum transfer */
117 	0xffffffffU,	/* maximum transfer */
118 	0xffffffffU,	/* maximum segment length */
119 	1,		/* maximum number of segments */
120 	1,		/* granularity */
121 	0,		/* flags (reserved) */
122 };
123 
124 /*
125  * DMA attributes for a keep warm DRAM descriptor
126  */
127 static ddi_dma_attr_t kw_dma_attr = {
128 	DMA_ATTR_V0,	/* version of this structure */
129 	0,		/* lowest usable address */
130 	0xffffffffU,	/* highest usable address */
131 	0xffffffffU,	/* maximum DMAable byte count */
132 	0x1000,		/* alignment in bytes */
133 	0x1000,		/* burst sizes (any?) */
134 	1,		/* minimum transfer */
135 	0xffffffffU,	/* maximum transfer */
136 	0xffffffffU,	/* maximum segment length */
137 	1,		/* maximum number of segments */
138 	1,		/* granularity */
139 	0,		/* flags (reserved) */
140 };
141 
142 /*
143  * DMA attributes for a ring descriptor
144  */
145 static ddi_dma_attr_t ring_desc_dma_attr = {
146 	DMA_ATTR_V0,	/* version of this structure */
147 	0,		/* lowest usable address */
148 	0xffffffffU,	/* highest usable address */
149 	0xffffffffU,	/* maximum DMAable byte count */
150 	0x100,		/* alignment in bytes */
151 	0x100,		/* burst sizes (any?) */
152 	1,		/* minimum transfer */
153 	0xffffffffU,	/* maximum transfer */
154 	0xffffffffU,	/* maximum segment length */
155 	1,		/* maximum number of segments */
156 	1,		/* granularity */
157 	0,		/* flags (reserved) */
158 };
159 
160 /*
161  * DMA attributes for a cmd
162  */
163 static ddi_dma_attr_t cmd_dma_attr = {
164 	DMA_ATTR_V0,	/* version of this structure */
165 	0,		/* lowest usable address */
166 	0xffffffffU,	/* highest usable address */
167 	0xffffffffU,	/* maximum DMAable byte count */
168 	4,		/* alignment in bytes */
169 	0x100,		/* burst sizes (any?) */
170 	1,		/* minimum transfer */
171 	0xffffffffU,	/* maximum transfer */
172 	0xffffffffU,	/* maximum segment length */
173 	1,		/* maximum number of segments */
174 	1,		/* granularity */
175 	0,		/* flags (reserved) */
176 };
177 
178 /*
179  * DMA attributes for a rx buffer
180  */
181 static ddi_dma_attr_t rx_buffer_dma_attr = {
182 	DMA_ATTR_V0,	/* version of this structure */
183 	0,		/* lowest usable address */
184 	0xffffffffU,	/* highest usable address */
185 	0xffffffffU,	/* maximum DMAable byte count */
186 	0x100,		/* alignment in bytes */
187 	0x100,		/* burst sizes (any?) */
188 	1,		/* minimum transfer */
189 	0xffffffffU,	/* maximum transfer */
190 	0xffffffffU,	/* maximum segment length */
191 	1,		/* maximum number of segments */
192 	1,		/* granularity */
193 	0,		/* flags (reserved) */
194 };
195 
196 /*
197  * DMA attributes for a tx buffer.
198  * the maximum number of segments is 4 for the hardware.
199  * now all the wifi drivers put the whole frame in a single
200  * descriptor, so we define the maximum  number of segments 1,
201  * just the same as the rx_buffer. we consider leverage the HW
202  * ability in the future, that is why we don't define rx and tx
203  * buffer_dma_attr as the same.
204  */
205 static ddi_dma_attr_t tx_buffer_dma_attr = {
206 	DMA_ATTR_V0,	/* version of this structure */
207 	0,		/* lowest usable address */
208 	0xffffffffU,	/* highest usable address */
209 	0xffffffffU,	/* maximum DMAable byte count */
210 	4,		/* alignment in bytes */
211 	0x100,		/* burst sizes (any?) */
212 	1,		/* minimum transfer */
213 	0xffffffffU,	/* maximum transfer */
214 	0xffffffffU,	/* maximum segment length */
215 	1,		/* maximum number of segments */
216 	1,		/* granularity */
217 	0,		/* flags (reserved) */
218 };
219 
220 /*
221  * DMA attributes for text and data part in the firmware
222  */
223 static ddi_dma_attr_t fw_dma_attr = {
224 	DMA_ATTR_V0,	/* version of this structure */
225 	0,		/* lowest usable address */
226 	0xffffffffU,	/* highest usable address */
227 	0x7fffffff,	/* maximum DMAable byte count */
228 	0x10,		/* alignment in bytes */
229 	0x100,		/* burst sizes (any?) */
230 	1,		/* minimum transfer */
231 	0xffffffffU,	/* maximum transfer */
232 	0xffffffffU,	/* maximum segment length */
233 	1,		/* maximum number of segments */
234 	1,		/* granularity */
235 	0,		/* flags (reserved) */
236 };
237 
238 /*
239  * regs access attributes
240  */
241 static ddi_device_acc_attr_t iwp_reg_accattr = {
242 	DDI_DEVICE_ATTR_V0,
243 	DDI_STRUCTURE_LE_ACC,
244 	DDI_STRICTORDER_ACC,
245 	DDI_DEFAULT_ACC
246 };
247 
248 /*
249  * DMA access attributes for descriptor
250  */
251 static ddi_device_acc_attr_t iwp_dma_descattr = {
252 	DDI_DEVICE_ATTR_V0,
253 	DDI_STRUCTURE_LE_ACC,
254 	DDI_STRICTORDER_ACC,
255 	DDI_DEFAULT_ACC
256 };
257 
258 /*
259  * DMA access attributes
260  */
261 static ddi_device_acc_attr_t iwp_dma_accattr = {
262 	DDI_DEVICE_ATTR_V0,
263 	DDI_NEVERSWAP_ACC,
264 	DDI_STRICTORDER_ACC,
265 	DDI_DEFAULT_ACC
266 };
267 
268 static int	iwp_ring_init(iwp_sc_t *);
269 static void	iwp_ring_free(iwp_sc_t *);
270 static int	iwp_alloc_shared(iwp_sc_t *);
271 static void	iwp_free_shared(iwp_sc_t *);
272 static int	iwp_alloc_kw(iwp_sc_t *);
273 static void	iwp_free_kw(iwp_sc_t *);
274 static int	iwp_alloc_fw_dma(iwp_sc_t *);
275 static void	iwp_free_fw_dma(iwp_sc_t *);
276 static int	iwp_alloc_rx_ring(iwp_sc_t *);
277 static void	iwp_reset_rx_ring(iwp_sc_t *);
278 static void	iwp_free_rx_ring(iwp_sc_t *);
279 static int	iwp_alloc_tx_ring(iwp_sc_t *, iwp_tx_ring_t *,
280     int, int);
281 static void	iwp_reset_tx_ring(iwp_sc_t *, iwp_tx_ring_t *);
282 static void	iwp_free_tx_ring(iwp_tx_ring_t *);
283 static ieee80211_node_t *iwp_node_alloc(ieee80211com_t *);
284 static void	iwp_node_free(ieee80211_node_t *);
285 static int	iwp_newstate(ieee80211com_t *, enum ieee80211_state, int);
286 static void	iwp_mac_access_enter(iwp_sc_t *);
287 static void	iwp_mac_access_exit(iwp_sc_t *);
288 static uint32_t	iwp_reg_read(iwp_sc_t *, uint32_t);
289 static void	iwp_reg_write(iwp_sc_t *, uint32_t, uint32_t);
290 static int	iwp_load_init_firmware(iwp_sc_t *);
291 static int	iwp_load_run_firmware(iwp_sc_t *);
292 static void	iwp_tx_intr(iwp_sc_t *, iwp_rx_desc_t *);
293 static void	iwp_cmd_intr(iwp_sc_t *, iwp_rx_desc_t *);
294 static uint_t   iwp_intr(caddr_t, caddr_t);
295 static int	iwp_eep_load(iwp_sc_t *);
296 static void	iwp_get_mac_from_eep(iwp_sc_t *);
297 static int	iwp_eep_sem_down(iwp_sc_t *);
298 static void	iwp_eep_sem_up(iwp_sc_t *);
299 static uint_t   iwp_rx_softintr(caddr_t, caddr_t);
300 static uint8_t	iwp_rate_to_plcp(int);
301 static int	iwp_cmd(iwp_sc_t *, int, const void *, int, int);
302 static void	iwp_set_led(iwp_sc_t *, uint8_t, uint8_t, uint8_t);
303 static int	iwp_hw_set_before_auth(iwp_sc_t *);
304 static int	iwp_scan(iwp_sc_t *);
305 static int	iwp_config(iwp_sc_t *);
306 static void	iwp_stop_master(iwp_sc_t *);
307 static int	iwp_power_up(iwp_sc_t *);
308 static int	iwp_preinit(iwp_sc_t *);
309 static int	iwp_init(iwp_sc_t *);
310 static void	iwp_stop(iwp_sc_t *);
311 static int	iwp_quiesce(dev_info_t *t);
312 static void	iwp_amrr_init(iwp_amrr_t *);
313 static void	iwp_amrr_timeout(iwp_sc_t *);
314 static void	iwp_amrr_ratectl(void *, ieee80211_node_t *);
315 static void	iwp_ucode_alive(iwp_sc_t *, iwp_rx_desc_t *);
316 static void	iwp_rx_phy_intr(iwp_sc_t *, iwp_rx_desc_t *);
317 static void	iwp_rx_mpdu_intr(iwp_sc_t *, iwp_rx_desc_t *);
318 static void	iwp_release_calib_buffer(iwp_sc_t *);
319 static int	iwp_init_common(iwp_sc_t *);
320 static uint8_t	*iwp_eep_addr_trans(iwp_sc_t *, uint32_t);
321 static int	iwp_put_seg_fw(iwp_sc_t *, uint32_t, uint32_t, uint32_t);
322 static	int	iwp_alive_common(iwp_sc_t *);
323 static void	iwp_save_calib_result(iwp_sc_t *, iwp_rx_desc_t *);
324 static int	iwp_attach(dev_info_t *, ddi_attach_cmd_t);
325 static int	iwp_detach(dev_info_t *, ddi_detach_cmd_t);
326 static void	iwp_destroy_locks(iwp_sc_t *);
327 static int	iwp_send(ieee80211com_t *, mblk_t *, uint8_t);
328 static void	iwp_thread(iwp_sc_t *);
329 static int	iwp_run_state_config(iwp_sc_t *);
330 static int	iwp_fast_recover(iwp_sc_t *);
331 static void	iwp_overwrite_ic_default(iwp_sc_t *);
332 static int	iwp_add_ap_sta(iwp_sc_t *);
333 static int	iwp_alloc_dma_mem(iwp_sc_t *, size_t,
334     ddi_dma_attr_t *, ddi_device_acc_attr_t *,
335     uint_t, iwp_dma_t *);
336 static void	iwp_free_dma_mem(iwp_dma_t *);
337 static int	iwp_eep_ver_chk(iwp_sc_t *);
338 static void	iwp_set_chip_param(iwp_sc_t *);
339 
340 /*
341  * GLD specific operations
342  */
343 static int	iwp_m_stat(void *, uint_t, uint64_t *);
344 static int	iwp_m_start(void *);
345 static void	iwp_m_stop(void *);
346 static int	iwp_m_unicst(void *, const uint8_t *);
347 static int	iwp_m_multicst(void *, boolean_t, const uint8_t *);
348 static int	iwp_m_promisc(void *, boolean_t);
349 static mblk_t	*iwp_m_tx(void *, mblk_t *);
350 static void	iwp_m_ioctl(void *, queue_t *, mblk_t *);
351 static int	iwp_m_setprop(void *arg, const char *pr_name,
352     mac_prop_id_t wldp_pr_num, uint_t wldp_length, const void *wldp_buf);
353 static int	iwp_m_getprop(void *arg, const char *pr_name,
354     mac_prop_id_t wldp_pr_num, uint_t wldp_length, void *wldp_buf);
355 static void	iwp_m_propinfo(void *, const char *, mac_prop_id_t,
356     mac_prop_info_handle_t);
357 
358 /*
359  * Supported rates for 802.11b/g modes (in 500Kbps unit).
360  */
361 static const struct ieee80211_rateset iwp_rateset_11b =
362 	{ 4, { 2, 4, 11, 22 } };
363 
364 static const struct ieee80211_rateset iwp_rateset_11g =
365 	{ 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
366 
367 /*
368  * For mfthread only
369  */
370 extern pri_t minclsyspri;
371 
372 #define	DRV_NAME_SP	"iwp"
373 
374 /*
375  * Module Loading Data & Entry Points
376  */
377 DDI_DEFINE_STREAM_OPS(iwp_devops, nulldev, nulldev, iwp_attach,
378     iwp_detach, nodev, NULL, D_MP, NULL, iwp_quiesce);
379 
380 static struct modldrv iwp_modldrv = {
381 	&mod_driverops,
382 	"Intel(R) PumaPeak driver(N)",
383 	&iwp_devops
384 };
385 
386 static struct modlinkage iwp_modlinkage = {
387 	MODREV_1,
388 	&iwp_modldrv,
389 	NULL
390 };
391 
392 int
393 _init(void)
394 {
395 	int	status;
396 
397 	status = ddi_soft_state_init(&iwp_soft_state_p,
398 	    sizeof (iwp_sc_t), 1);
399 	if (status != DDI_SUCCESS) {
400 		return (status);
401 	}
402 
403 	mac_init_ops(&iwp_devops, DRV_NAME_SP);
404 	status = mod_install(&iwp_modlinkage);
405 	if (status != DDI_SUCCESS) {
406 		mac_fini_ops(&iwp_devops);
407 		ddi_soft_state_fini(&iwp_soft_state_p);
408 	}
409 
410 	return (status);
411 }
412 
413 int
414 _fini(void)
415 {
416 	int status;
417 
418 	status = mod_remove(&iwp_modlinkage);
419 	if (DDI_SUCCESS == status) {
420 		mac_fini_ops(&iwp_devops);
421 		ddi_soft_state_fini(&iwp_soft_state_p);
422 	}
423 
424 	return (status);
425 }
426 
427 int
428 _info(struct modinfo *mip)
429 {
430 	return (mod_info(&iwp_modlinkage, mip));
431 }
432 
433 /*
434  * Mac Call Back entries
435  */
436 mac_callbacks_t	iwp_m_callbacks = {
437 	MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
438 	iwp_m_stat,
439 	iwp_m_start,
440 	iwp_m_stop,
441 	iwp_m_promisc,
442 	iwp_m_multicst,
443 	iwp_m_unicst,
444 	iwp_m_tx,
445 	NULL,
446 	iwp_m_ioctl,
447 	NULL,
448 	NULL,
449 	NULL,
450 	iwp_m_setprop,
451 	iwp_m_getprop,
452 	iwp_m_propinfo
453 };
454 
455 #ifdef DEBUG
456 void
457 iwp_dbg(uint32_t flags, const char *fmt, ...)
458 {
459 	va_list	ap;
460 
461 	if (flags & iwp_dbg_flags) {
462 		va_start(ap, fmt);
463 		vcmn_err(CE_NOTE, fmt, ap);
464 		va_end(ap);
465 	}
466 }
467 #endif	/* DEBUG */
468 
469 /*
470  * device operations
471  */
472 int
473 iwp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
474 {
475 	iwp_sc_t		*sc;
476 	ieee80211com_t		*ic;
477 	int			instance, i;
478 	char			strbuf[32];
479 	wifi_data_t		wd = { 0 };
480 	mac_register_t		*macp;
481 	int			intr_type;
482 	int			intr_count;
483 	int			intr_actual;
484 	int			err = DDI_FAILURE;
485 
486 	switch (cmd) {
487 	case DDI_ATTACH:
488 		break;
489 	case DDI_RESUME:
490 		instance = ddi_get_instance(dip);
491 		sc = ddi_get_soft_state(iwp_soft_state_p,
492 		    instance);
493 		ASSERT(sc != NULL);
494 
495 		if (sc->sc_flags & IWP_F_RUNNING) {
496 			(void) iwp_init(sc);
497 		}
498 
499 		atomic_and_32(&sc->sc_flags, ~IWP_F_SUSPEND);
500 
501 		IWP_DBG((IWP_DEBUG_RESUME, "iwp_attach(): "
502 		    "resume\n"));
503 		return (DDI_SUCCESS);
504 	default:
505 		goto attach_fail1;
506 	}
507 
508 	instance = ddi_get_instance(dip);
509 	err = ddi_soft_state_zalloc(iwp_soft_state_p, instance);
510 	if (err != DDI_SUCCESS) {
511 		cmn_err(CE_WARN, "iwp_attach(): "
512 		    "failed to allocate soft state\n");
513 		goto attach_fail1;
514 	}
515 
516 	sc = ddi_get_soft_state(iwp_soft_state_p, instance);
517 	ASSERT(sc != NULL);
518 
519 	sc->sc_dip = dip;
520 
521 	/*
522 	 * map configure space
523 	 */
524 	err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0,
525 	    &iwp_reg_accattr, &sc->sc_cfg_handle);
526 	if (err != DDI_SUCCESS) {
527 		cmn_err(CE_WARN, "iwp_attach(): "
528 		    "failed to map config spaces regs\n");
529 		goto attach_fail2;
530 	}
531 
532 	sc->sc_dev_id = ddi_get16(sc->sc_cfg_handle,
533 	    (uint16_t *)(sc->sc_cfg_base + PCI_CONF_DEVID));
534 	if ((sc->sc_dev_id != 0x422B) &&
535 	    (sc->sc_dev_id != 0x422C) &&
536 	    (sc->sc_dev_id != 0x4238) &&
537 	    (sc->sc_dev_id != 0x4239) &&
538 	    (sc->sc_dev_id != 0x008d) &&
539 	    (sc->sc_dev_id != 0x008e)) {
540 		cmn_err(CE_WARN, "iwp_attach(): "
541 		    "Do not support this device\n");
542 		goto attach_fail3;
543 	}
544 
545 	iwp_set_chip_param(sc);
546 
547 	sc->sc_rev = ddi_get8(sc->sc_cfg_handle,
548 	    (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID));
549 
550 	/*
551 	 * keep from disturbing C3 state of CPU
552 	 */
553 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base +
554 	    PCI_CFG_RETRY_TIMEOUT), 0);
555 
556 	/*
557 	 * determine the size of buffer for frame and command to ucode
558 	 */
559 	sc->sc_clsz = ddi_get16(sc->sc_cfg_handle,
560 	    (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ));
561 	if (!sc->sc_clsz) {
562 		sc->sc_clsz = 16;
563 	}
564 	sc->sc_clsz = (sc->sc_clsz << 2);
565 
566 	sc->sc_dmabuf_sz = roundup(0x1000 + sizeof (struct ieee80211_frame) +
567 	    IEEE80211_MTU + IEEE80211_CRC_LEN +
568 	    (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
569 	    IEEE80211_WEP_CRCLEN), sc->sc_clsz);
570 
571 	/*
572 	 * Map operating registers
573 	 */
574 	err = ddi_regs_map_setup(dip, 1, &sc->sc_base,
575 	    0, 0, &iwp_reg_accattr, &sc->sc_handle);
576 	if (err != DDI_SUCCESS) {
577 		cmn_err(CE_WARN, "iwp_attach(): "
578 		    "failed to map device regs\n");
579 		goto attach_fail3;
580 	}
581 
582 	/*
583 	 * this is used to differentiate type of hardware
584 	 */
585 	sc->sc_hw_rev = IWP_READ(sc, CSR_HW_REV);
586 
587 	err = ddi_intr_get_supported_types(dip, &intr_type);
588 	if ((err != DDI_SUCCESS) || (!(intr_type & DDI_INTR_TYPE_FIXED))) {
589 		cmn_err(CE_WARN, "iwp_attach(): "
590 		    "fixed type interrupt is not supported\n");
591 		goto attach_fail4;
592 	}
593 
594 	err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &intr_count);
595 	if ((err != DDI_SUCCESS) || (intr_count != 1)) {
596 		cmn_err(CE_WARN, "iwp_attach(): "
597 		    "no fixed interrupts\n");
598 		goto attach_fail4;
599 	}
600 
601 	sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
602 
603 	err = ddi_intr_alloc(dip, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0,
604 	    intr_count, &intr_actual, 0);
605 	if ((err != DDI_SUCCESS) || (intr_actual != 1)) {
606 		cmn_err(CE_WARN, "iwp_attach(): "
607 		    "ddi_intr_alloc() failed 0x%x\n", err);
608 		goto attach_fail5;
609 	}
610 
611 	err = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri);
612 	if (err != DDI_SUCCESS) {
613 		cmn_err(CE_WARN, "iwp_attach(): "
614 		    "ddi_intr_get_pri() failed 0x%x\n", err);
615 		goto attach_fail6;
616 	}
617 
618 	mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER,
619 	    DDI_INTR_PRI(sc->sc_intr_pri));
620 	mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER,
621 	    DDI_INTR_PRI(sc->sc_intr_pri));
622 	mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER,
623 	    DDI_INTR_PRI(sc->sc_intr_pri));
624 
625 	cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL);
626 	cv_init(&sc->sc_put_seg_cv, NULL, CV_DRIVER, NULL);
627 	cv_init(&sc->sc_ucode_cv, NULL, CV_DRIVER, NULL);
628 
629 	/*
630 	 * initialize the mfthread
631 	 */
632 	cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL);
633 	sc->sc_mf_thread = NULL;
634 	sc->sc_mf_thread_switch = 0;
635 
636 	/*
637 	 * Allocate shared buffer for communication between driver and ucode.
638 	 */
639 	err = iwp_alloc_shared(sc);
640 	if (err != DDI_SUCCESS) {
641 		cmn_err(CE_WARN, "iwp_attach(): "
642 		    "failed to allocate shared page\n");
643 		goto attach_fail7;
644 	}
645 
646 	(void) memset(sc->sc_shared, 0, sizeof (iwp_shared_t));
647 
648 	/*
649 	 * Allocate keep warm page.
650 	 */
651 	err = iwp_alloc_kw(sc);
652 	if (err != DDI_SUCCESS) {
653 		cmn_err(CE_WARN, "iwp_attach(): "
654 		    "failed to allocate keep warm page\n");
655 		goto attach_fail8;
656 	}
657 
658 	/*
659 	 * Do some necessary hardware initializations.
660 	 */
661 	err = iwp_preinit(sc);
662 	if (err != IWP_SUCCESS) {
663 		cmn_err(CE_WARN, "iwp_attach(): "
664 		    "failed to initialize hardware\n");
665 		goto attach_fail9;
666 	}
667 
668 	/*
669 	 * get hardware configurations from eeprom
670 	 */
671 	err = iwp_eep_load(sc);
672 	if (err != IWP_SUCCESS) {
673 		cmn_err(CE_WARN, "iwp_attach(): "
674 		    "failed to load eeprom\n");
675 		goto attach_fail9;
676 	}
677 
678 	/*
679 	 * calibration information from EEPROM
680 	 */
681 	sc->sc_eep_calib = (struct iwp_eep_calibration *)
682 	    iwp_eep_addr_trans(sc, EEP_CALIBRATION);
683 
684 	err = iwp_eep_ver_chk(sc);
685 	if (err != IWP_SUCCESS) {
686 		goto attach_fail9;
687 	}
688 
689 	/*
690 	 * get MAC address of this chipset
691 	 */
692 	iwp_get_mac_from_eep(sc);
693 
694 
695 	/*
696 	 * initialize TX and RX ring buffers
697 	 */
698 	err = iwp_ring_init(sc);
699 	if (err != DDI_SUCCESS) {
700 		cmn_err(CE_WARN, "iwp_attach(): "
701 		    "failed to allocate and initialize ring\n");
702 		goto attach_fail9;
703 	}
704 
705 	sc->sc_hdr = (iwp_firmware_hdr_t *)iwp_fw_bin;
706 
707 	/*
708 	 * copy ucode to dma buffer
709 	 */
710 	err = iwp_alloc_fw_dma(sc);
711 	if (err != DDI_SUCCESS) {
712 		cmn_err(CE_WARN, "iwp_attach(): "
713 		    "failed to allocate firmware dma\n");
714 		goto attach_fail10;
715 	}
716 
717 	/*
718 	 * Initialize the wifi part, which will be used by
719 	 * 802.11 module
720 	 */
721 	ic = &sc->sc_ic;
722 	ic->ic_phytype  = IEEE80211_T_OFDM;
723 	ic->ic_opmode   = IEEE80211_M_STA; /* default to BSS mode */
724 	ic->ic_state    = IEEE80211_S_INIT;
725 	ic->ic_maxrssi  = 100; /* experimental number */
726 	ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT |
727 	    IEEE80211_C_PMGT | IEEE80211_C_SHSLOT;
728 
729 	/*
730 	 * Support WPA/WPA2
731 	 */
732 	ic->ic_caps |= IEEE80211_C_WPA;
733 
734 	/*
735 	 * set supported .11b and .11g rates
736 	 */
737 	ic->ic_sup_rates[IEEE80211_MODE_11B] = iwp_rateset_11b;
738 	ic->ic_sup_rates[IEEE80211_MODE_11G] = iwp_rateset_11g;
739 
740 	/*
741 	 * set supported .11b and .11g channels (1 through 11)
742 	 */
743 	for (i = 1; i <= 11; i++) {
744 		ic->ic_sup_channels[i].ich_freq =
745 		    ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
746 		ic->ic_sup_channels[i].ich_flags =
747 		    IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
748 		    IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ |
749 		    IEEE80211_CHAN_PASSIVE;
750 	}
751 
752 	ic->ic_ibss_chan = &ic->ic_sup_channels[0];
753 	ic->ic_xmit = iwp_send;
754 
755 	/*
756 	 * attach to 802.11 module
757 	 */
758 	ieee80211_attach(ic);
759 
760 	/*
761 	 * different instance has different WPA door
762 	 */
763 	(void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
764 	    ddi_driver_name(dip),
765 	    ddi_get_instance(dip));
766 
767 	/*
768 	 * Overwrite 80211 default configurations.
769 	 */
770 	iwp_overwrite_ic_default(sc);
771 
772 	/*
773 	 * initialize 802.11 module
774 	 */
775 	ieee80211_media_init(ic);
776 
777 	/*
778 	 * initialize default tx key
779 	 */
780 	ic->ic_def_txkey = 0;
781 
782 	err = ddi_intr_add_softint(dip, &sc->sc_soft_hdl, DDI_INTR_SOFTPRI_MAX,
783 	    iwp_rx_softintr, (caddr_t)sc);
784 	if (err != DDI_SUCCESS) {
785 		cmn_err(CE_WARN, "iwp_attach(): "
786 		    "add soft interrupt failed\n");
787 		goto attach_fail12;
788 	}
789 
790 	err = ddi_intr_add_handler(sc->sc_intr_htable[0], iwp_intr,
791 	    (caddr_t)sc, NULL);
792 	if (err != DDI_SUCCESS) {
793 		cmn_err(CE_WARN, "iwp_attach(): "
794 		    "ddi_intr_add_handle() failed\n");
795 		goto attach_fail13;
796 	}
797 
798 	err = ddi_intr_enable(sc->sc_intr_htable[0]);
799 	if (err != DDI_SUCCESS) {
800 		cmn_err(CE_WARN, "iwp_attach(): "
801 		    "ddi_intr_enable() failed\n");
802 		goto attach_fail14;
803 	}
804 
805 	/*
806 	 * Initialize pointer to device specific functions
807 	 */
808 	wd.wd_secalloc = WIFI_SEC_NONE;
809 	wd.wd_opmode = ic->ic_opmode;
810 	IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr);
811 
812 	/*
813 	 * create relation to GLD
814 	 */
815 	macp = mac_alloc(MAC_VERSION);
816 	if (NULL == macp) {
817 		cmn_err(CE_WARN, "iwp_attach(): "
818 		    "failed to do mac_alloc()\n");
819 		goto attach_fail15;
820 	}
821 
822 	macp->m_type_ident	= MAC_PLUGIN_IDENT_WIFI;
823 	macp->m_driver		= sc;
824 	macp->m_dip		= dip;
825 	macp->m_src_addr	= ic->ic_macaddr;
826 	macp->m_callbacks	= &iwp_m_callbacks;
827 	macp->m_min_sdu		= 0;
828 	macp->m_max_sdu		= IEEE80211_MTU;
829 	macp->m_pdata		= &wd;
830 	macp->m_pdata_size	= sizeof (wd);
831 
832 	/*
833 	 * Register the macp to mac
834 	 */
835 	err = mac_register(macp, &ic->ic_mach);
836 	mac_free(macp);
837 	if (err != DDI_SUCCESS) {
838 		cmn_err(CE_WARN, "iwp_attach(): "
839 		    "failed to do mac_register()\n");
840 		goto attach_fail15;
841 	}
842 
843 	/*
844 	 * Create minor node of type DDI_NT_NET_WIFI
845 	 */
846 	(void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_SP"%d", instance);
847 	err = ddi_create_minor_node(dip, strbuf, S_IFCHR,
848 	    instance + 1, DDI_NT_NET_WIFI, 0);
849 	if (err != DDI_SUCCESS) {
850 		cmn_err(CE_WARN, "iwp_attach(): "
851 		    "failed to do ddi_create_minor_node()\n");
852 	}
853 
854 	/*
855 	 * Notify link is down now
856 	 */
857 	mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
858 
859 	/*
860 	 * create the mf thread to handle the link status,
861 	 * recovery fatal error, etc.
862 	 */
863 	sc->sc_mf_thread_switch = 1;
864 	if (NULL == sc->sc_mf_thread) {
865 		sc->sc_mf_thread = thread_create((caddr_t)NULL, 0,
866 		    iwp_thread, sc, 0, &p0, TS_RUN, minclsyspri);
867 	}
868 
869 	atomic_or_32(&sc->sc_flags, IWP_F_ATTACHED);
870 
871 	return (DDI_SUCCESS);
872 
873 attach_fail15:
874 	(void) ddi_intr_disable(sc->sc_intr_htable[0]);
875 attach_fail14:
876 	(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
877 attach_fail13:
878 	(void) ddi_intr_remove_softint(sc->sc_soft_hdl);
879 	sc->sc_soft_hdl = NULL;
880 attach_fail12:
881 	ieee80211_detach(ic);
882 attach_fail11:
883 	iwp_free_fw_dma(sc);
884 attach_fail10:
885 	iwp_ring_free(sc);
886 attach_fail9:
887 	iwp_free_kw(sc);
888 attach_fail8:
889 	iwp_free_shared(sc);
890 attach_fail7:
891 	iwp_destroy_locks(sc);
892 attach_fail6:
893 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
894 attach_fail5:
895 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
896 attach_fail4:
897 	ddi_regs_map_free(&sc->sc_handle);
898 attach_fail3:
899 	ddi_regs_map_free(&sc->sc_cfg_handle);
900 attach_fail2:
901 	ddi_soft_state_free(iwp_soft_state_p, instance);
902 attach_fail1:
903 	return (DDI_FAILURE);
904 }
905 
906 int
907 iwp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
908 {
909 	iwp_sc_t *sc;
910 	ieee80211com_t	*ic;
911 	int err;
912 
913 	sc = ddi_get_soft_state(iwp_soft_state_p, ddi_get_instance(dip));
914 	ASSERT(sc != NULL);
915 	ic = &sc->sc_ic;
916 
917 	switch (cmd) {
918 	case DDI_DETACH:
919 		break;
920 	case DDI_SUSPEND:
921 		atomic_and_32(&sc->sc_flags, ~IWP_F_HW_ERR_RECOVER);
922 		atomic_and_32(&sc->sc_flags, ~IWP_F_RATE_AUTO_CTL);
923 
924 		atomic_or_32(&sc->sc_flags, IWP_F_SUSPEND);
925 
926 		if (sc->sc_flags & IWP_F_RUNNING) {
927 			iwp_stop(sc);
928 			ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
929 
930 		}
931 
932 		IWP_DBG((IWP_DEBUG_RESUME, "iwp_detach(): "
933 		    "suspend\n"));
934 		return (DDI_SUCCESS);
935 	default:
936 		return (DDI_FAILURE);
937 	}
938 
939 	if (!(sc->sc_flags & IWP_F_ATTACHED)) {
940 		return (DDI_FAILURE);
941 	}
942 
943 	/*
944 	 * Destroy the mf_thread
945 	 */
946 	sc->sc_mf_thread_switch = 0;
947 
948 	mutex_enter(&sc->sc_mt_lock);
949 	while (sc->sc_mf_thread != NULL) {
950 		if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0) {
951 			break;
952 		}
953 	}
954 	mutex_exit(&sc->sc_mt_lock);
955 
956 	err = mac_disable(sc->sc_ic.ic_mach);
957 	if (err != DDI_SUCCESS) {
958 		return (err);
959 	}
960 
961 	/*
962 	 * stop chipset
963 	 */
964 	iwp_stop(sc);
965 
966 	DELAY(500000);
967 
968 	/*
969 	 * release buffer for calibration
970 	 */
971 	iwp_release_calib_buffer(sc);
972 
973 	/*
974 	 * Unregiste from GLD
975 	 */
976 	(void) mac_unregister(sc->sc_ic.ic_mach);
977 
978 	mutex_enter(&sc->sc_glock);
979 	iwp_free_fw_dma(sc);
980 	iwp_ring_free(sc);
981 	iwp_free_kw(sc);
982 	iwp_free_shared(sc);
983 	mutex_exit(&sc->sc_glock);
984 
985 	(void) ddi_intr_disable(sc->sc_intr_htable[0]);
986 	(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
987 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
988 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
989 
990 	(void) ddi_intr_remove_softint(sc->sc_soft_hdl);
991 	sc->sc_soft_hdl = NULL;
992 
993 	/*
994 	 * detach from 80211 module
995 	 */
996 	ieee80211_detach(&sc->sc_ic);
997 
998 	iwp_destroy_locks(sc);
999 
1000 	ddi_regs_map_free(&sc->sc_handle);
1001 	ddi_regs_map_free(&sc->sc_cfg_handle);
1002 	ddi_remove_minor_node(dip, NULL);
1003 	ddi_soft_state_free(iwp_soft_state_p, ddi_get_instance(dip));
1004 
1005 	return (DDI_SUCCESS);
1006 }
1007 
1008 /*
1009  * destroy all locks
1010  */
1011 static void
1012 iwp_destroy_locks(iwp_sc_t *sc)
1013 {
1014 	cv_destroy(&sc->sc_mt_cv);
1015 	cv_destroy(&sc->sc_cmd_cv);
1016 	cv_destroy(&sc->sc_put_seg_cv);
1017 	cv_destroy(&sc->sc_ucode_cv);
1018 	mutex_destroy(&sc->sc_mt_lock);
1019 	mutex_destroy(&sc->sc_tx_lock);
1020 	mutex_destroy(&sc->sc_glock);
1021 }
1022 
1023 /*
1024  * Allocate an area of memory and a DMA handle for accessing it
1025  */
1026 static int
1027 iwp_alloc_dma_mem(iwp_sc_t *sc, size_t memsize,
1028     ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p,
1029     uint_t dma_flags, iwp_dma_t *dma_p)
1030 {
1031 	caddr_t vaddr;
1032 	int err = DDI_FAILURE;
1033 
1034 	/*
1035 	 * Allocate handle
1036 	 */
1037 	err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p,
1038 	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
1039 	if (err != DDI_SUCCESS) {
1040 		dma_p->dma_hdl = NULL;
1041 		return (DDI_FAILURE);
1042 	}
1043 
1044 	/*
1045 	 * Allocate memory
1046 	 */
1047 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
1048 	    dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
1049 	    DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
1050 	if (err != DDI_SUCCESS) {
1051 		ddi_dma_free_handle(&dma_p->dma_hdl);
1052 		dma_p->dma_hdl = NULL;
1053 		dma_p->acc_hdl = NULL;
1054 		return (DDI_FAILURE);
1055 	}
1056 
1057 	/*
1058 	 * Bind the two together
1059 	 */
1060 	dma_p->mem_va = vaddr;
1061 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
1062 	    vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
1063 	    &dma_p->cookie, &dma_p->ncookies);
1064 	if (err != DDI_DMA_MAPPED) {
1065 		ddi_dma_mem_free(&dma_p->acc_hdl);
1066 		ddi_dma_free_handle(&dma_p->dma_hdl);
1067 		dma_p->acc_hdl = NULL;
1068 		dma_p->dma_hdl = NULL;
1069 		return (DDI_FAILURE);
1070 	}
1071 
1072 	dma_p->nslots = ~0U;
1073 	dma_p->size = ~0U;
1074 	dma_p->token = ~0U;
1075 	dma_p->offset = 0;
1076 	return (DDI_SUCCESS);
1077 }
1078 
1079 /*
1080  * Free one allocated area of DMAable memory
1081  */
1082 static void
1083 iwp_free_dma_mem(iwp_dma_t *dma_p)
1084 {
1085 	if (dma_p->dma_hdl != NULL) {
1086 		if (dma_p->ncookies) {
1087 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
1088 			dma_p->ncookies = 0;
1089 		}
1090 		ddi_dma_free_handle(&dma_p->dma_hdl);
1091 		dma_p->dma_hdl = NULL;
1092 	}
1093 
1094 	if (dma_p->acc_hdl != NULL) {
1095 		ddi_dma_mem_free(&dma_p->acc_hdl);
1096 		dma_p->acc_hdl = NULL;
1097 	}
1098 }
1099 
1100 /*
1101  * copy ucode into dma buffers
1102  */
1103 static int
1104 iwp_alloc_fw_dma(iwp_sc_t *sc)
1105 {
1106 	int err = DDI_FAILURE;
1107 	iwp_dma_t *dma_p;
1108 	char *t;
1109 
1110 	/*
1111 	 * firmware image layout:
1112 	 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->|
1113 	 */
1114 
1115 	/*
1116 	 * Check firmware image size.
1117 	 */
1118 	if (LE_32(sc->sc_hdr->init_textsz) > RTC_INST_SIZE) {
1119 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1120 		    "firmware init text size 0x%x is too large\n",
1121 		    LE_32(sc->sc_hdr->init_textsz));
1122 
1123 		goto fail;
1124 	}
1125 
1126 	if (LE_32(sc->sc_hdr->init_datasz) > RTC_DATA_SIZE) {
1127 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1128 		    "firmware init data size 0x%x is too large\n",
1129 		    LE_32(sc->sc_hdr->init_datasz));
1130 
1131 		goto fail;
1132 	}
1133 
1134 	if (LE_32(sc->sc_hdr->textsz) > RTC_INST_SIZE) {
1135 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1136 		    "firmware text size 0x%x is too large\n",
1137 		    LE_32(sc->sc_hdr->textsz));
1138 
1139 		goto fail;
1140 	}
1141 
1142 	if (LE_32(sc->sc_hdr->datasz) > RTC_DATA_SIZE) {
1143 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1144 		    "firmware data size 0x%x is too large\n",
1145 		    LE_32(sc->sc_hdr->datasz));
1146 
1147 		goto fail;
1148 	}
1149 
1150 	/*
1151 	 * copy text of runtime ucode
1152 	 */
1153 	t = (char *)(sc->sc_hdr + 1);
1154 	err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz),
1155 	    &fw_dma_attr, &iwp_dma_accattr,
1156 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1157 	    &sc->sc_dma_fw_text);
1158 	if (err != DDI_SUCCESS) {
1159 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1160 		    "failed to allocate text dma memory.\n");
1161 		goto fail;
1162 	}
1163 
1164 	dma_p = &sc->sc_dma_fw_text;
1165 
1166 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1167 	    "text[ncookies:%d addr:%lx size:%lx]\n",
1168 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1169 	    dma_p->cookie.dmac_size));
1170 
1171 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->textsz));
1172 
1173 	/*
1174 	 * copy data and bak-data of runtime ucode
1175 	 */
1176 	t += LE_32(sc->sc_hdr->textsz);
1177 	err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1178 	    &fw_dma_attr, &iwp_dma_accattr,
1179 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1180 	    &sc->sc_dma_fw_data);
1181 	if (err != DDI_SUCCESS) {
1182 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1183 		    "failed to allocate data dma memory\n");
1184 		goto fail;
1185 	}
1186 
1187 	dma_p = &sc->sc_dma_fw_data;
1188 
1189 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1190 	    "data[ncookies:%d addr:%lx size:%lx]\n",
1191 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1192 	    dma_p->cookie.dmac_size));
1193 
1194 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1195 
1196 	err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1197 	    &fw_dma_attr, &iwp_dma_accattr,
1198 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1199 	    &sc->sc_dma_fw_data_bak);
1200 	if (err != DDI_SUCCESS) {
1201 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1202 		    "failed to allocate data bakup dma memory\n");
1203 		goto fail;
1204 	}
1205 
1206 	dma_p = &sc->sc_dma_fw_data_bak;
1207 
1208 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1209 	    "data_bak[ncookies:%d addr:%lx "
1210 	    "size:%lx]\n",
1211 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1212 	    dma_p->cookie.dmac_size));
1213 
1214 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1215 
1216 	/*
1217 	 * copy text of init ucode
1218 	 */
1219 	t += LE_32(sc->sc_hdr->datasz);
1220 	err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz),
1221 	    &fw_dma_attr, &iwp_dma_accattr,
1222 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1223 	    &sc->sc_dma_fw_init_text);
1224 	if (err != DDI_SUCCESS) {
1225 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1226 		    "failed to allocate init text dma memory\n");
1227 		goto fail;
1228 	}
1229 
1230 	dma_p = &sc->sc_dma_fw_init_text;
1231 
1232 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1233 	    "init_text[ncookies:%d addr:%lx "
1234 	    "size:%lx]\n",
1235 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1236 	    dma_p->cookie.dmac_size));
1237 
1238 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_textsz));
1239 
1240 	/*
1241 	 * copy data of init ucode
1242 	 */
1243 	t += LE_32(sc->sc_hdr->init_textsz);
1244 	err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz),
1245 	    &fw_dma_attr, &iwp_dma_accattr,
1246 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1247 	    &sc->sc_dma_fw_init_data);
1248 	if (err != DDI_SUCCESS) {
1249 		cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1250 		    "failed to allocate init data dma memory\n");
1251 		goto fail;
1252 	}
1253 
1254 	dma_p = &sc->sc_dma_fw_init_data;
1255 
1256 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1257 	    "init_data[ncookies:%d addr:%lx "
1258 	    "size:%lx]\n",
1259 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1260 	    dma_p->cookie.dmac_size));
1261 
1262 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_datasz));
1263 
1264 	sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz);
1265 fail:
1266 	return (err);
1267 }
1268 
1269 static void
1270 iwp_free_fw_dma(iwp_sc_t *sc)
1271 {
1272 	iwp_free_dma_mem(&sc->sc_dma_fw_text);
1273 	iwp_free_dma_mem(&sc->sc_dma_fw_data);
1274 	iwp_free_dma_mem(&sc->sc_dma_fw_data_bak);
1275 	iwp_free_dma_mem(&sc->sc_dma_fw_init_text);
1276 	iwp_free_dma_mem(&sc->sc_dma_fw_init_data);
1277 }
1278 
1279 /*
1280  * Allocate a shared buffer between host and NIC.
1281  */
1282 static int
1283 iwp_alloc_shared(iwp_sc_t *sc)
1284 {
1285 #ifdef	DEBUG
1286 	iwp_dma_t *dma_p;
1287 #endif
1288 	int err = DDI_FAILURE;
1289 
1290 	/*
1291 	 * must be aligned on a 4K-page boundary
1292 	 */
1293 	err = iwp_alloc_dma_mem(sc, sizeof (iwp_shared_t),
1294 	    &sh_dma_attr, &iwp_dma_descattr,
1295 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1296 	    &sc->sc_dma_sh);
1297 	if (err != DDI_SUCCESS) {
1298 		goto fail;
1299 	}
1300 
1301 	sc->sc_shared = (iwp_shared_t *)sc->sc_dma_sh.mem_va;
1302 
1303 #ifdef	DEBUG
1304 	dma_p = &sc->sc_dma_sh;
1305 #endif
1306 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_shared(): "
1307 	    "sh[ncookies:%d addr:%lx size:%lx]\n",
1308 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1309 	    dma_p->cookie.dmac_size));
1310 
1311 	return (err);
1312 fail:
1313 	iwp_free_shared(sc);
1314 	return (err);
1315 }
1316 
1317 static void
1318 iwp_free_shared(iwp_sc_t *sc)
1319 {
1320 	iwp_free_dma_mem(&sc->sc_dma_sh);
1321 }
1322 
1323 /*
1324  * Allocate a keep warm page.
1325  */
1326 static int
1327 iwp_alloc_kw(iwp_sc_t *sc)
1328 {
1329 #ifdef	DEBUG
1330 	iwp_dma_t *dma_p;
1331 #endif
1332 	int err = DDI_FAILURE;
1333 
1334 	/*
1335 	 * must be aligned on a 4K-page boundary
1336 	 */
1337 	err = iwp_alloc_dma_mem(sc, IWP_KW_SIZE,
1338 	    &kw_dma_attr, &iwp_dma_descattr,
1339 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1340 	    &sc->sc_dma_kw);
1341 	if (err != DDI_SUCCESS) {
1342 		goto fail;
1343 	}
1344 
1345 #ifdef	DEBUG
1346 	dma_p = &sc->sc_dma_kw;
1347 #endif
1348 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_kw(): "
1349 	    "kw[ncookies:%d addr:%lx size:%lx]\n",
1350 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1351 	    dma_p->cookie.dmac_size));
1352 
1353 	return (err);
1354 fail:
1355 	iwp_free_kw(sc);
1356 	return (err);
1357 }
1358 
1359 static void
1360 iwp_free_kw(iwp_sc_t *sc)
1361 {
1362 	iwp_free_dma_mem(&sc->sc_dma_kw);
1363 }
1364 
1365 /*
1366  * initialize RX ring buffers
1367  */
1368 static int
1369 iwp_alloc_rx_ring(iwp_sc_t *sc)
1370 {
1371 	iwp_rx_ring_t *ring;
1372 	iwp_rx_data_t *data;
1373 #ifdef	DEBUG
1374 	iwp_dma_t *dma_p;
1375 #endif
1376 	int i, err = DDI_FAILURE;
1377 
1378 	ring = &sc->sc_rxq;
1379 	ring->cur = 0;
1380 
1381 	/*
1382 	 * allocate RX description ring buffer
1383 	 */
1384 	err = iwp_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t),
1385 	    &ring_desc_dma_attr, &iwp_dma_descattr,
1386 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1387 	    &ring->dma_desc);
1388 	if (err != DDI_SUCCESS) {
1389 		IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1390 		    "dma alloc rx ring desc "
1391 		    "failed\n"));
1392 		goto fail;
1393 	}
1394 
1395 	ring->desc = (uint32_t *)ring->dma_desc.mem_va;
1396 #ifdef	DEBUG
1397 	dma_p = &ring->dma_desc;
1398 #endif
1399 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1400 	    "rx bd[ncookies:%d addr:%lx size:%lx]\n",
1401 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1402 	    dma_p->cookie.dmac_size));
1403 
1404 	/*
1405 	 * Allocate Rx frame buffers.
1406 	 */
1407 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1408 		data = &ring->data[i];
1409 		err = iwp_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1410 		    &rx_buffer_dma_attr, &iwp_dma_accattr,
1411 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1412 		    &data->dma_data);
1413 		if (err != DDI_SUCCESS) {
1414 			IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1415 			    "dma alloc rx ring "
1416 			    "buf[%d] failed\n", i));
1417 			goto fail;
1418 		}
1419 		/*
1420 		 * the physical address bit [8-36] are used,
1421 		 * instead of bit [0-31] in 3945.
1422 		 */
1423 		ring->desc[i] = (uint32_t)
1424 		    (data->dma_data.cookie.dmac_address >> 8);
1425 	}
1426 
1427 #ifdef	DEBUG
1428 	dma_p = &ring->data[0].dma_data;
1429 #endif
1430 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1431 	    "rx buffer[0][ncookies:%d addr:%lx "
1432 	    "size:%lx]\n",
1433 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1434 	    dma_p->cookie.dmac_size));
1435 
1436 	IWP_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1437 
1438 	return (err);
1439 
1440 fail:
1441 	iwp_free_rx_ring(sc);
1442 	return (err);
1443 }
1444 
1445 /*
1446  * disable RX ring
1447  */
1448 static void
1449 iwp_reset_rx_ring(iwp_sc_t *sc)
1450 {
1451 	int n;
1452 
1453 	iwp_mac_access_enter(sc);
1454 	IWP_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1455 	for (n = 0; n < 2000; n++) {
1456 		if (IWP_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24)) {
1457 			break;
1458 		}
1459 		DELAY(1000);
1460 	}
1461 #ifdef DEBUG
1462 	if (2000 == n) {
1463 		IWP_DBG((IWP_DEBUG_DMA, "iwp_reset_rx_ring(): "
1464 		    "timeout resetting Rx ring\n"));
1465 	}
1466 #endif
1467 	iwp_mac_access_exit(sc);
1468 
1469 	sc->sc_rxq.cur = 0;
1470 }
1471 
1472 static void
1473 iwp_free_rx_ring(iwp_sc_t *sc)
1474 {
1475 	int i;
1476 
1477 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1478 		if (sc->sc_rxq.data[i].dma_data.dma_hdl) {
1479 			IWP_DMA_SYNC(sc->sc_rxq.data[i].dma_data,
1480 			    DDI_DMA_SYNC_FORCPU);
1481 		}
1482 
1483 		iwp_free_dma_mem(&sc->sc_rxq.data[i].dma_data);
1484 	}
1485 
1486 	if (sc->sc_rxq.dma_desc.dma_hdl) {
1487 		IWP_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV);
1488 	}
1489 
1490 	iwp_free_dma_mem(&sc->sc_rxq.dma_desc);
1491 }
1492 
1493 /*
1494  * initialize TX ring buffers
1495  */
1496 static int
1497 iwp_alloc_tx_ring(iwp_sc_t *sc, iwp_tx_ring_t *ring,
1498     int slots, int qid)
1499 {
1500 	iwp_tx_data_t *data;
1501 	iwp_tx_desc_t *desc_h;
1502 	uint32_t paddr_desc_h;
1503 	iwp_cmd_t *cmd_h;
1504 	uint32_t paddr_cmd_h;
1505 #ifdef	DEBUG
1506 	iwp_dma_t *dma_p;
1507 #endif
1508 	int i, err = DDI_FAILURE;
1509 	ring->qid = qid;
1510 	ring->count = TFD_QUEUE_SIZE_MAX;
1511 	ring->window = slots;
1512 	ring->queued = 0;
1513 	ring->cur = 0;
1514 	ring->desc_cur = 0;
1515 
1516 	/*
1517 	 * allocate buffer for TX descriptor ring
1518 	 */
1519 	err = iwp_alloc_dma_mem(sc,
1520 	    TFD_QUEUE_SIZE_MAX * sizeof (iwp_tx_desc_t),
1521 	    &ring_desc_dma_attr, &iwp_dma_descattr,
1522 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1523 	    &ring->dma_desc);
1524 	if (err != DDI_SUCCESS) {
1525 		IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1526 		    "dma alloc tx ring desc[%d] "
1527 		    "failed\n", qid));
1528 		goto fail;
1529 	}
1530 
1531 #ifdef	DEBUG
1532 	dma_p = &ring->dma_desc;
1533 #endif
1534 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1535 	    "tx bd[ncookies:%d addr:%lx size:%lx]\n",
1536 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1537 	    dma_p->cookie.dmac_size));
1538 
1539 	desc_h = (iwp_tx_desc_t *)ring->dma_desc.mem_va;
1540 	paddr_desc_h = ring->dma_desc.cookie.dmac_address;
1541 
1542 	/*
1543 	 * allocate buffer for ucode command
1544 	 */
1545 	err = iwp_alloc_dma_mem(sc,
1546 	    TFD_QUEUE_SIZE_MAX * sizeof (iwp_cmd_t),
1547 	    &cmd_dma_attr, &iwp_dma_accattr,
1548 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1549 	    &ring->dma_cmd);
1550 	if (err != DDI_SUCCESS) {
1551 		IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1552 		    "dma alloc tx ring cmd[%d]"
1553 		    " failed\n", qid));
1554 		goto fail;
1555 	}
1556 
1557 #ifdef	DEBUG
1558 	dma_p = &ring->dma_cmd;
1559 #endif
1560 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1561 	    "tx cmd[ncookies:%d addr:%lx size:%lx]\n",
1562 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1563 	    dma_p->cookie.dmac_size));
1564 
1565 	cmd_h = (iwp_cmd_t *)ring->dma_cmd.mem_va;
1566 	paddr_cmd_h = ring->dma_cmd.cookie.dmac_address;
1567 
1568 	/*
1569 	 * Allocate Tx frame buffers.
1570 	 */
1571 	ring->data = kmem_zalloc(sizeof (iwp_tx_data_t) * TFD_QUEUE_SIZE_MAX,
1572 	    KM_NOSLEEP);
1573 	if (NULL == ring->data) {
1574 		IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1575 		    "could not allocate "
1576 		    "tx data slots\n"));
1577 		goto fail;
1578 	}
1579 
1580 	for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) {
1581 		data = &ring->data[i];
1582 		err = iwp_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1583 		    &tx_buffer_dma_attr, &iwp_dma_accattr,
1584 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
1585 		    &data->dma_data);
1586 		if (err != DDI_SUCCESS) {
1587 			IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1588 			    "dma alloc tx "
1589 			    "ring buf[%d] failed\n", i));
1590 			goto fail;
1591 		}
1592 
1593 		data->desc = desc_h + i;
1594 		data->paddr_desc = paddr_desc_h +
1595 		    _PTRDIFF(data->desc, desc_h);
1596 		data->cmd = cmd_h +  i;
1597 		data->paddr_cmd = paddr_cmd_h +
1598 		    _PTRDIFF(data->cmd, cmd_h);
1599 	}
1600 #ifdef	DEBUG
1601 	dma_p = &ring->data[0].dma_data;
1602 #endif
1603 	IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1604 	    "tx buffer[0][ncookies:%d addr:%lx "
1605 	    "size:%lx]\n",
1606 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1607 	    dma_p->cookie.dmac_size));
1608 
1609 	return (err);
1610 
1611 fail:
1612 	iwp_free_tx_ring(ring);
1613 
1614 	return (err);
1615 }
1616 
1617 /*
1618  * disable TX ring
1619  */
1620 static void
1621 iwp_reset_tx_ring(iwp_sc_t *sc, iwp_tx_ring_t *ring)
1622 {
1623 	iwp_tx_data_t *data;
1624 	int i, n;
1625 
1626 	iwp_mac_access_enter(sc);
1627 
1628 	IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0);
1629 	for (n = 0; n < 200; n++) {
1630 		if (IWP_READ(sc, IWP_FH_TSSR_TX_STATUS_REG) &
1631 		    IWP_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid)) {
1632 			break;
1633 		}
1634 		DELAY(10);
1635 	}
1636 
1637 #ifdef	DEBUG
1638 	if (200 == n) {
1639 		IWP_DBG((IWP_DEBUG_DMA, "iwp_reset_tx_ring(): "
1640 		    "timeout reset tx ring %d\n",
1641 		    ring->qid));
1642 	}
1643 #endif
1644 
1645 	iwp_mac_access_exit(sc);
1646 
1647 	/* by pass, if it's quiesce */
1648 	if (!(sc->sc_flags & IWP_F_QUIESCED)) {
1649 		for (i = 0; i < ring->count; i++) {
1650 			data = &ring->data[i];
1651 			IWP_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
1652 		}
1653 	}
1654 
1655 	ring->queued = 0;
1656 	ring->cur = 0;
1657 	ring->desc_cur = 0;
1658 }
1659 
1660 static void
1661 iwp_free_tx_ring(iwp_tx_ring_t *ring)
1662 {
1663 	int i;
1664 
1665 	if (ring->dma_desc.dma_hdl != NULL) {
1666 		IWP_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1667 	}
1668 	iwp_free_dma_mem(&ring->dma_desc);
1669 
1670 	if (ring->dma_cmd.dma_hdl != NULL) {
1671 		IWP_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV);
1672 	}
1673 	iwp_free_dma_mem(&ring->dma_cmd);
1674 
1675 	if (ring->data != NULL) {
1676 		for (i = 0; i < ring->count; i++) {
1677 			if (ring->data[i].dma_data.dma_hdl) {
1678 				IWP_DMA_SYNC(ring->data[i].dma_data,
1679 				    DDI_DMA_SYNC_FORDEV);
1680 			}
1681 			iwp_free_dma_mem(&ring->data[i].dma_data);
1682 		}
1683 		kmem_free(ring->data, ring->count * sizeof (iwp_tx_data_t));
1684 	}
1685 }
1686 
1687 /*
1688  * initialize TX and RX ring
1689  */
1690 static int
1691 iwp_ring_init(iwp_sc_t *sc)
1692 {
1693 	int i, err = DDI_FAILURE;
1694 
1695 	for (i = 0; i < IWP_NUM_QUEUES; i++) {
1696 		if (IWP_CMD_QUEUE_NUM == i) {
1697 			continue;
1698 		}
1699 
1700 		err = iwp_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS,
1701 		    i);
1702 		if (err != DDI_SUCCESS) {
1703 			goto fail;
1704 		}
1705 	}
1706 
1707 	/*
1708 	 * initialize command queue
1709 	 */
1710 	err = iwp_alloc_tx_ring(sc, &sc->sc_txq[IWP_CMD_QUEUE_NUM],
1711 	    TFD_CMD_SLOTS, IWP_CMD_QUEUE_NUM);
1712 	if (err != DDI_SUCCESS) {
1713 		goto fail;
1714 	}
1715 
1716 	err = iwp_alloc_rx_ring(sc);
1717 	if (err != DDI_SUCCESS) {
1718 		goto fail;
1719 	}
1720 
1721 fail:
1722 	return (err);
1723 }
1724 
1725 static void
1726 iwp_ring_free(iwp_sc_t *sc)
1727 {
1728 	int i = IWP_NUM_QUEUES;
1729 
1730 	iwp_free_rx_ring(sc);
1731 	while (--i >= 0) {
1732 		iwp_free_tx_ring(&sc->sc_txq[i]);
1733 	}
1734 }
1735 
1736 /* ARGSUSED */
1737 static ieee80211_node_t *
1738 iwp_node_alloc(ieee80211com_t *ic)
1739 {
1740 	iwp_amrr_t *amrr;
1741 
1742 	amrr = kmem_zalloc(sizeof (iwp_amrr_t), KM_SLEEP);
1743 	if (NULL == amrr) {
1744 		cmn_err(CE_WARN, "iwp_node_alloc(): "
1745 		    "failed to allocate memory for amrr structure\n");
1746 		return (NULL);
1747 	}
1748 
1749 	iwp_amrr_init(amrr);
1750 
1751 	return (&amrr->in);
1752 }
1753 
1754 static void
1755 iwp_node_free(ieee80211_node_t *in)
1756 {
1757 	ieee80211com_t *ic;
1758 
1759 	if ((NULL == in) ||
1760 	    (NULL == in->in_ic)) {
1761 		cmn_err(CE_WARN, "iwp_node_free() "
1762 		    "Got a NULL point from Net80211 module\n");
1763 		return;
1764 	}
1765 	ic = in->in_ic;
1766 
1767 	if (ic->ic_node_cleanup != NULL) {
1768 		ic->ic_node_cleanup(in);
1769 	}
1770 
1771 	if (in->in_wpa_ie != NULL) {
1772 		ieee80211_free(in->in_wpa_ie);
1773 	}
1774 
1775 	if (in->in_wme_ie != NULL) {
1776 		ieee80211_free(in->in_wme_ie);
1777 	}
1778 
1779 	if (in->in_htcap_ie != NULL) {
1780 		ieee80211_free(in->in_htcap_ie);
1781 	}
1782 
1783 	kmem_free(in, sizeof (iwp_amrr_t));
1784 }
1785 
1786 
1787 /*
1788  * change station's state. this function will be invoked by 80211 module
1789  * when need to change staton's state.
1790  */
1791 static int
1792 iwp_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1793 {
1794 	iwp_sc_t *sc;
1795 	ieee80211_node_t *in;
1796 	enum ieee80211_state ostate;
1797 	iwp_add_sta_t node;
1798 	int i, err = IWP_FAIL;
1799 
1800 	if (NULL == ic) {
1801 		return (err);
1802 	}
1803 	sc = (iwp_sc_t *)ic;
1804 	in = ic->ic_bss;
1805 	ostate = ic->ic_state;
1806 
1807 	mutex_enter(&sc->sc_glock);
1808 
1809 	switch (nstate) {
1810 	case IEEE80211_S_SCAN:
1811 		switch (ostate) {
1812 		case IEEE80211_S_INIT:
1813 			atomic_or_32(&sc->sc_flags, IWP_F_SCANNING);
1814 			iwp_set_led(sc, 2, 10, 2);
1815 
1816 			/*
1817 			 * clear association to receive beacons from
1818 			 * all BSS'es
1819 			 */
1820 			sc->sc_config.assoc_id = 0;
1821 			sc->sc_config.filter_flags &=
1822 			    ~LE_32(RXON_FILTER_ASSOC_MSK);
1823 
1824 			IWP_DBG((IWP_DEBUG_80211, "iwp_newstate(): "
1825 			    "config chan %d "
1826 			    "flags %x filter_flags %x\n",
1827 			    LE_16(sc->sc_config.chan),
1828 			    LE_32(sc->sc_config.flags),
1829 			    LE_32(sc->sc_config.filter_flags)));
1830 
1831 			err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
1832 			    sizeof (iwp_rxon_cmd_t), 1);
1833 			if (err != IWP_SUCCESS) {
1834 				cmn_err(CE_WARN, "iwp_newstate(): "
1835 				    "could not clear association\n");
1836 				atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1837 				mutex_exit(&sc->sc_glock);
1838 				return (err);
1839 			}
1840 
1841 			/* add broadcast node to send probe request */
1842 			(void) memset(&node, 0, sizeof (node));
1843 			(void) memset(&node.sta.addr, 0xff, IEEE80211_ADDR_LEN);
1844 			node.sta.sta_id = IWP_BROADCAST_ID;
1845 			err = iwp_cmd(sc, REPLY_ADD_STA, &node,
1846 			    sizeof (node), 1);
1847 			if (err != IWP_SUCCESS) {
1848 				cmn_err(CE_WARN, "iwp_newstate(): "
1849 				    "could not add broadcast node\n");
1850 				atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1851 				mutex_exit(&sc->sc_glock);
1852 				return (err);
1853 			}
1854 			break;
1855 		case IEEE80211_S_SCAN:
1856 			mutex_exit(&sc->sc_glock);
1857 			/* step to next channel before actual FW scan */
1858 			err = sc->sc_newstate(ic, nstate, arg);
1859 			mutex_enter(&sc->sc_glock);
1860 			if ((err != 0) || ((err = iwp_scan(sc)) != 0)) {
1861 				cmn_err(CE_WARN, "iwp_newstate(): "
1862 				    "could not initiate scan\n");
1863 				atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1864 				ieee80211_cancel_scan(ic);
1865 			}
1866 			mutex_exit(&sc->sc_glock);
1867 			return (err);
1868 		default:
1869 			break;
1870 		}
1871 		sc->sc_clk = 0;
1872 		break;
1873 
1874 	case IEEE80211_S_AUTH:
1875 		if (ostate == IEEE80211_S_SCAN) {
1876 			atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1877 		}
1878 
1879 		/*
1880 		 * reset state to handle reassociations correctly
1881 		 */
1882 		sc->sc_config.assoc_id = 0;
1883 		sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
1884 
1885 		/*
1886 		 * before sending authentication and association request frame,
1887 		 * we need do something in the hardware, such as setting the
1888 		 * channel same to the target AP...
1889 		 */
1890 		if ((err = iwp_hw_set_before_auth(sc)) != 0) {
1891 			IWP_DBG((IWP_DEBUG_80211, "iwp_newstate(): "
1892 			    "could not send authentication request\n"));
1893 			mutex_exit(&sc->sc_glock);
1894 			return (err);
1895 		}
1896 		break;
1897 
1898 	case IEEE80211_S_RUN:
1899 		if (ostate == IEEE80211_S_SCAN) {
1900 			atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1901 		}
1902 
1903 		if (IEEE80211_M_MONITOR == ic->ic_opmode) {
1904 			/* let LED blink when monitoring */
1905 			iwp_set_led(sc, 2, 10, 10);
1906 			break;
1907 		}
1908 
1909 		IWP_DBG((IWP_DEBUG_80211, "iwp_newstate(): "
1910 		    "associated.\n"));
1911 
1912 		err = iwp_run_state_config(sc);
1913 		if (err != IWP_SUCCESS) {
1914 			cmn_err(CE_WARN, "iwp_newstate(): "
1915 			    "failed to set up association\n");
1916 			mutex_exit(&sc->sc_glock);
1917 			return (err);
1918 		}
1919 
1920 		/*
1921 		 * start automatic rate control
1922 		 */
1923 		if (IEEE80211_FIXED_RATE_NONE == ic->ic_fixed_rate) {
1924 			atomic_or_32(&sc->sc_flags, IWP_F_RATE_AUTO_CTL);
1925 
1926 			/*
1927 			 * set rate to some reasonable initial value
1928 			 */
1929 			i = in->in_rates.ir_nrates - 1;
1930 			while (i > 0 && IEEE80211_RATE(i) > 72) {
1931 				i--;
1932 			}
1933 			in->in_txrate = i;
1934 
1935 		} else {
1936 			atomic_and_32(&sc->sc_flags, ~IWP_F_RATE_AUTO_CTL);
1937 		}
1938 
1939 		/*
1940 		 * set LED on after associated
1941 		 */
1942 		iwp_set_led(sc, 2, 0, 1);
1943 		break;
1944 
1945 	case IEEE80211_S_INIT:
1946 		if (ostate == IEEE80211_S_SCAN) {
1947 			atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1948 		}
1949 		/*
1950 		 * set LED off after init
1951 		 */
1952 		iwp_set_led(sc, 2, 1, 0);
1953 		break;
1954 
1955 	case IEEE80211_S_ASSOC:
1956 		if (ostate == IEEE80211_S_SCAN) {
1957 			atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1958 		}
1959 		break;
1960 	}
1961 
1962 	mutex_exit(&sc->sc_glock);
1963 
1964 	return (sc->sc_newstate(ic, nstate, arg));
1965 }
1966 
1967 /*
1968  * exclusive access to mac begin.
1969  */
1970 static void
1971 iwp_mac_access_enter(iwp_sc_t *sc)
1972 {
1973 	uint32_t tmp;
1974 	int n;
1975 
1976 	tmp = IWP_READ(sc, CSR_GP_CNTRL);
1977 	IWP_WRITE(sc, CSR_GP_CNTRL,
1978 	    tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1979 
1980 	/* wait until we succeed */
1981 	for (n = 0; n < 1000; n++) {
1982 		if ((IWP_READ(sc, CSR_GP_CNTRL) &
1983 		    (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1984 		    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) ==
1985 		    CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN) {
1986 			break;
1987 		}
1988 		DELAY(10);
1989 	}
1990 
1991 #ifdef	DEBUG
1992 	if (1000 == n) {
1993 		IWP_DBG((IWP_DEBUG_PIO, "iwp_mac_access_enter(): "
1994 		    "could not lock memory\n"));
1995 	}
1996 #endif
1997 }
1998 
1999 /*
2000  * exclusive access to mac end.
2001  */
2002 static void
2003 iwp_mac_access_exit(iwp_sc_t *sc)
2004 {
2005 	uint32_t tmp = IWP_READ(sc, CSR_GP_CNTRL);
2006 	IWP_WRITE(sc, CSR_GP_CNTRL,
2007 	    tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2008 }
2009 
2010 /*
2011  * this function defined here for future use.
2012  * static uint32_t
2013  * iwp_mem_read(iwp_sc_t *sc, uint32_t addr)
2014  * {
2015  * 	IWP_WRITE(sc, HBUS_TARG_MEM_RADDR, addr);
2016  * 	return (IWP_READ(sc, HBUS_TARG_MEM_RDAT));
2017  * }
2018  */
2019 
2020 /*
2021  * write mac memory
2022  */
2023 static void
2024 iwp_mem_write(iwp_sc_t *sc, uint32_t addr, uint32_t data)
2025 {
2026 	IWP_WRITE(sc, HBUS_TARG_MEM_WADDR, addr);
2027 	IWP_WRITE(sc, HBUS_TARG_MEM_WDAT, data);
2028 }
2029 
2030 /*
2031  * read mac register
2032  */
2033 static uint32_t
2034 iwp_reg_read(iwp_sc_t *sc, uint32_t addr)
2035 {
2036 	IWP_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24));
2037 	return (IWP_READ(sc, HBUS_TARG_PRPH_RDAT));
2038 }
2039 
2040 /*
2041  * write mac register
2042  */
2043 static void
2044 iwp_reg_write(iwp_sc_t *sc, uint32_t addr, uint32_t data)
2045 {
2046 	IWP_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24));
2047 	IWP_WRITE(sc, HBUS_TARG_PRPH_WDAT, data);
2048 }
2049 
2050 
2051 /*
2052  * steps of loading ucode:
2053  * load init ucode=>init alive=>calibrate=>
2054  * receive calibration result=>reinitialize NIC=>
2055  * load runtime ucode=>runtime alive=>
2056  * send calibration result=>running.
2057  */
2058 static int
2059 iwp_load_init_firmware(iwp_sc_t *sc)
2060 {
2061 	int	err = IWP_FAIL;
2062 	clock_t	clk;
2063 
2064 	atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2065 
2066 	/*
2067 	 * load init_text section of uCode to hardware
2068 	 */
2069 	err = iwp_put_seg_fw(sc, sc->sc_dma_fw_init_text.cookie.dmac_address,
2070 	    RTC_INST_LOWER_BOUND, sc->sc_dma_fw_init_text.cookie.dmac_size);
2071 	if (err != IWP_SUCCESS) {
2072 		cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2073 		    "failed to write init uCode.\n");
2074 		return (err);
2075 	}
2076 
2077 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
2078 
2079 	/* wait loading init_text until completed or timeout */
2080 	while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2081 		if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2082 			break;
2083 		}
2084 	}
2085 
2086 	if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2087 		cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2088 		    "timeout waiting for init uCode load.\n");
2089 		return (IWP_FAIL);
2090 	}
2091 
2092 	atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2093 
2094 	/*
2095 	 * load init_data section of uCode to hardware
2096 	 */
2097 	err = iwp_put_seg_fw(sc, sc->sc_dma_fw_init_data.cookie.dmac_address,
2098 	    RTC_DATA_LOWER_BOUND, sc->sc_dma_fw_init_data.cookie.dmac_size);
2099 	if (err != IWP_SUCCESS) {
2100 		cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2101 		    "failed to write init_data uCode.\n");
2102 		return (err);
2103 	}
2104 
2105 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
2106 
2107 	/*
2108 	 * wait loading init_data until completed or timeout
2109 	 */
2110 	while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2111 		if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2112 			break;
2113 		}
2114 	}
2115 
2116 	if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2117 		cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2118 		    "timeout waiting for init_data uCode load.\n");
2119 		return (IWP_FAIL);
2120 	}
2121 
2122 	atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2123 
2124 	return (err);
2125 }
2126 
2127 static int
2128 iwp_load_run_firmware(iwp_sc_t *sc)
2129 {
2130 	int	err = IWP_FAIL;
2131 	clock_t	clk;
2132 
2133 	atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2134 
2135 	/*
2136 	 * load init_text section of uCode to hardware
2137 	 */
2138 	err = iwp_put_seg_fw(sc, sc->sc_dma_fw_text.cookie.dmac_address,
2139 	    RTC_INST_LOWER_BOUND, sc->sc_dma_fw_text.cookie.dmac_size);
2140 	if (err != IWP_SUCCESS) {
2141 		cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2142 		    "failed to write run uCode.\n");
2143 		return (err);
2144 	}
2145 
2146 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
2147 
2148 	/* wait loading run_text until completed or timeout */
2149 	while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2150 		if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2151 			break;
2152 		}
2153 	}
2154 
2155 	if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2156 		cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2157 		    "timeout waiting for run uCode load.\n");
2158 		return (IWP_FAIL);
2159 	}
2160 
2161 	atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2162 
2163 	/*
2164 	 * load run_data section of uCode to hardware
2165 	 */
2166 	err = iwp_put_seg_fw(sc, sc->sc_dma_fw_data_bak.cookie.dmac_address,
2167 	    RTC_DATA_LOWER_BOUND, sc->sc_dma_fw_data.cookie.dmac_size);
2168 	if (err != IWP_SUCCESS) {
2169 		cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2170 		    "failed to write run_data uCode.\n");
2171 		return (err);
2172 	}
2173 
2174 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
2175 
2176 	/*
2177 	 * wait loading run_data until completed or timeout
2178 	 */
2179 	while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2180 		if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2181 			break;
2182 		}
2183 	}
2184 
2185 	if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2186 		cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2187 		    "timeout waiting for run_data uCode load.\n");
2188 		return (IWP_FAIL);
2189 	}
2190 
2191 	atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2192 
2193 	return (err);
2194 }
2195 
2196 /*
2197  * this function will be invoked to receive phy information
2198  * when a frame is received.
2199  */
2200 static void
2201 iwp_rx_phy_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2202 {
2203 
2204 	sc->sc_rx_phy_res.flag = 1;
2205 
2206 	(void) memcpy(sc->sc_rx_phy_res.buf, (uint8_t *)(desc + 1),
2207 	    sizeof (iwp_rx_phy_res_t));
2208 }
2209 
2210 /*
2211  * this function will be invoked to receive body of frame when
2212  * a frame is received.
2213  */
2214 static void
2215 iwp_rx_mpdu_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2216 {
2217 	ieee80211com_t	*ic = &sc->sc_ic;
2218 #ifdef	DEBUG
2219 	iwp_rx_ring_t	*ring = &sc->sc_rxq;
2220 #endif
2221 	struct ieee80211_frame		*wh;
2222 	struct iwp_rx_non_cfg_phy	*phyinfo;
2223 	struct	iwp_rx_mpdu_body_size	*mpdu_size;
2224 
2225 	mblk_t			*mp;
2226 	int16_t			t;
2227 	uint16_t		len, rssi, agc;
2228 	uint32_t		temp, crc, *tail;
2229 	uint32_t		arssi, brssi, crssi, mrssi;
2230 	iwp_rx_phy_res_t	*stat;
2231 	ieee80211_node_t	*in;
2232 
2233 	/*
2234 	 * assuming not 11n here. cope with 11n in phase-II
2235 	 */
2236 	mpdu_size = (struct iwp_rx_mpdu_body_size *)(desc + 1);
2237 	stat = (iwp_rx_phy_res_t *)sc->sc_rx_phy_res.buf;
2238 	if (stat->cfg_phy_cnt > 20) {
2239 		return;
2240 	}
2241 
2242 	phyinfo = (struct iwp_rx_non_cfg_phy *)stat->non_cfg_phy;
2243 	temp = LE_32(phyinfo->non_cfg_phy[IWP_RX_RES_AGC_IDX]);
2244 	agc = (temp & IWP_OFDM_AGC_MSK) >> IWP_OFDM_AGC_BIT_POS;
2245 
2246 	temp = LE_32(phyinfo->non_cfg_phy[IWP_RX_RES_RSSI_AB_IDX]);
2247 	arssi = (temp & IWP_OFDM_RSSI_A_MSK) >> IWP_OFDM_RSSI_A_BIT_POS;
2248 	brssi = (temp & IWP_OFDM_RSSI_B_MSK) >> IWP_OFDM_RSSI_B_BIT_POS;
2249 
2250 	temp = LE_32(phyinfo->non_cfg_phy[IWP_RX_RES_RSSI_C_IDX]);
2251 	crssi = (temp & IWP_OFDM_RSSI_C_MSK) >> IWP_OFDM_RSSI_C_BIT_POS;
2252 
2253 	mrssi = MAX(arssi, brssi);
2254 	mrssi = MAX(mrssi, crssi);
2255 
2256 	t = mrssi - agc - IWP_RSSI_OFFSET;
2257 	/*
2258 	 * convert dBm to percentage
2259 	 */
2260 	rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t)))
2261 	    / (75 * 75);
2262 	if (rssi > 100) {
2263 		rssi = 100;
2264 	}
2265 	if (rssi < 1) {
2266 		rssi = 1;
2267 	}
2268 
2269 	/*
2270 	 * size of frame, not include FCS
2271 	 */
2272 	len = LE_16(mpdu_size->byte_count);
2273 	tail = (uint32_t *)((uint8_t *)(desc + 1) +
2274 	    sizeof (struct iwp_rx_mpdu_body_size) + len);
2275 	bcopy(tail, &crc, 4);
2276 
2277 	IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2278 	    "rx intr: idx=%d phy_len=%x len=%d "
2279 	    "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x "
2280 	    "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat),
2281 	    len, stat->rate.r.s.rate, stat->channel,
2282 	    LE_32(stat->timestampl), stat->non_cfg_phy_cnt,
2283 	    stat->cfg_phy_cnt, LE_32(crc)));
2284 
2285 	if ((len < 16) || (len > sc->sc_dmabuf_sz)) {
2286 		IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2287 		    "rx frame oversize\n"));
2288 		return;
2289 	}
2290 
2291 	/*
2292 	 * discard Rx frames with bad CRC
2293 	 */
2294 	if ((LE_32(crc) &
2295 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) !=
2296 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) {
2297 		IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2298 		    "rx crc error tail: %x\n",
2299 		    LE_32(crc)));
2300 		sc->sc_rx_err++;
2301 		return;
2302 	}
2303 
2304 	wh = (struct ieee80211_frame *)
2305 	    ((uint8_t *)(desc + 1)+ sizeof (struct iwp_rx_mpdu_body_size));
2306 
2307 	if (IEEE80211_FC0_SUBTYPE_ASSOC_RESP == *(uint8_t *)wh) {
2308 		sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2);
2309 		IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2310 		    "rx : association id = %x\n",
2311 		    sc->sc_assoc_id));
2312 	}
2313 
2314 #ifdef DEBUG
2315 	if (iwp_dbg_flags & IWP_DEBUG_RX) {
2316 		ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0);
2317 	}
2318 #endif
2319 
2320 	in = ieee80211_find_rxnode(ic, wh);
2321 	mp = allocb(len, BPRI_MED);
2322 	if (mp) {
2323 		(void) memcpy(mp->b_wptr, wh, len);
2324 		mp->b_wptr += len;
2325 
2326 		/*
2327 		 * send the frame to the 802.11 layer
2328 		 */
2329 		(void) ieee80211_input(ic, mp, in, rssi, 0);
2330 	} else {
2331 		sc->sc_rx_nobuf++;
2332 		IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2333 		    "alloc rx buf failed\n"));
2334 	}
2335 
2336 	/*
2337 	 * release node reference
2338 	 */
2339 	ieee80211_free_node(in);
2340 }
2341 
2342 /*
2343  * process correlative affairs after a frame is sent.
2344  */
2345 static void
2346 iwp_tx_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2347 {
2348 	ieee80211com_t *ic = &sc->sc_ic;
2349 	iwp_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3];
2350 	iwp_tx_stat_t *stat = (iwp_tx_stat_t *)(desc + 1);
2351 	iwp_amrr_t *amrr;
2352 
2353 	if (NULL == ic->ic_bss) {
2354 		return;
2355 	}
2356 
2357 	amrr = (iwp_amrr_t *)ic->ic_bss;
2358 
2359 	amrr->txcnt++;
2360 	IWP_DBG((IWP_DEBUG_RATECTL, "iwp_tx_intr(): "
2361 	    "tx: %d cnt\n", amrr->txcnt));
2362 
2363 	if (stat->ntries > 0) {
2364 		amrr->retrycnt++;
2365 		sc->sc_tx_retries++;
2366 		IWP_DBG((IWP_DEBUG_TX, "iwp_tx_intr(): "
2367 		    "tx: %d retries\n",
2368 		    sc->sc_tx_retries));
2369 	}
2370 
2371 	mutex_enter(&sc->sc_mt_lock);
2372 	sc->sc_tx_timer = 0;
2373 	mutex_exit(&sc->sc_mt_lock);
2374 
2375 	mutex_enter(&sc->sc_tx_lock);
2376 
2377 	ring->queued--;
2378 	if (ring->queued < 0) {
2379 		ring->queued = 0;
2380 	}
2381 
2382 	if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count >> 3))) {
2383 		sc->sc_need_reschedule = 0;
2384 		mutex_exit(&sc->sc_tx_lock);
2385 		mac_tx_update(ic->ic_mach);
2386 		mutex_enter(&sc->sc_tx_lock);
2387 	}
2388 
2389 	mutex_exit(&sc->sc_tx_lock);
2390 }
2391 
2392 /*
2393  * inform a given command has been executed
2394  */
2395 static void
2396 iwp_cmd_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2397 {
2398 	if ((desc->hdr.qid & 7) != 4) {
2399 		return;
2400 	}
2401 
2402 	if (sc->sc_cmd_accum > 0) {
2403 		sc->sc_cmd_accum--;
2404 		return;
2405 	}
2406 
2407 	mutex_enter(&sc->sc_glock);
2408 
2409 	sc->sc_cmd_flag = SC_CMD_FLG_DONE;
2410 
2411 	cv_signal(&sc->sc_cmd_cv);
2412 
2413 	mutex_exit(&sc->sc_glock);
2414 
2415 	IWP_DBG((IWP_DEBUG_CMD, "iwp_cmd_intr(): "
2416 	    "qid=%x idx=%d flags=%x type=0x%x\n",
2417 	    desc->hdr.qid, desc->hdr.idx, desc->hdr.flags,
2418 	    desc->hdr.type));
2419 }
2420 
2421 /*
2422  * this function will be invoked when alive notification occur.
2423  */
2424 static void
2425 iwp_ucode_alive(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2426 {
2427 	uint32_t rv;
2428 	struct iwp_calib_cfg_cmd cmd;
2429 	struct iwp_alive_resp *ar =
2430 	    (struct iwp_alive_resp *)(desc + 1);
2431 	struct iwp_calib_results *res_p = &sc->sc_calib_results;
2432 
2433 	/*
2434 	 * the microcontroller is ready
2435 	 */
2436 	IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2437 	    "microcode alive notification minor: %x major: %x type: "
2438 	    "%x subtype: %x\n",
2439 	    ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype));
2440 
2441 #ifdef	DEBUG
2442 	if (LE_32(ar->is_valid) != UCODE_VALID_OK) {
2443 		IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2444 		    "microcontroller initialization failed\n"));
2445 	}
2446 #endif
2447 
2448 	/*
2449 	 * determine if init alive or runtime alive.
2450 	 */
2451 	if (INITIALIZE_SUBTYPE == ar->ver_subtype) {
2452 		IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2453 		    "initialization alive received.\n"));
2454 
2455 		(void) memcpy(&sc->sc_card_alive_init, ar,
2456 		    sizeof (struct iwp_init_alive_resp));
2457 
2458 		/*
2459 		 * necessary configuration to NIC
2460 		 */
2461 		mutex_enter(&sc->sc_glock);
2462 
2463 		rv = iwp_alive_common(sc);
2464 		if (rv != IWP_SUCCESS) {
2465 			cmn_err(CE_WARN, "iwp_ucode_alive(): "
2466 			    "common alive process failed in init alive.\n");
2467 			mutex_exit(&sc->sc_glock);
2468 			return;
2469 		}
2470 
2471 		(void) memset(&cmd, 0, sizeof (cmd));
2472 
2473 		cmd.ucd_calib_cfg.once.is_enable = IWP_CALIB_INIT_CFG_ALL;
2474 		cmd.ucd_calib_cfg.once.start = IWP_CALIB_INIT_CFG_ALL;
2475 		cmd.ucd_calib_cfg.once.send_res = IWP_CALIB_INIT_CFG_ALL;
2476 		cmd.ucd_calib_cfg.flags = IWP_CALIB_INIT_CFG_ALL;
2477 
2478 		/*
2479 		 * require ucode execute calibration
2480 		 */
2481 		rv = iwp_cmd(sc, CALIBRATION_CFG_CMD, &cmd, sizeof (cmd), 1);
2482 		if (rv != IWP_SUCCESS) {
2483 			cmn_err(CE_WARN, "iwp_ucode_alive(): "
2484 			    "failed to send calibration configure command.\n");
2485 			mutex_exit(&sc->sc_glock);
2486 			return;
2487 		}
2488 
2489 		mutex_exit(&sc->sc_glock);
2490 
2491 	} else {	/* runtime alive */
2492 
2493 		IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2494 		    "runtime alive received.\n"));
2495 
2496 		(void) memcpy(&sc->sc_card_alive_run, ar,
2497 		    sizeof (struct iwp_alive_resp));
2498 
2499 		mutex_enter(&sc->sc_glock);
2500 
2501 		/*
2502 		 * necessary configuration to NIC
2503 		 */
2504 		rv = iwp_alive_common(sc);
2505 		if (rv != IWP_SUCCESS) {
2506 			cmn_err(CE_WARN, "iwp_ucode_alive(): "
2507 			    "common alive process failed in run alive.\n");
2508 			mutex_exit(&sc->sc_glock);
2509 			return;
2510 		}
2511 
2512 		/*
2513 		 * send the result of local oscilator calibration to uCode.
2514 		 */
2515 		if (res_p->lo_res != NULL) {
2516 			rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2517 			    res_p->lo_res, res_p->lo_res_len, 1);
2518 			if (rv != IWP_SUCCESS) {
2519 				cmn_err(CE_WARN, "iwp_ucode_alive(): "
2520 				    "failed to send local"
2521 				    "oscilator calibration command.\n");
2522 				mutex_exit(&sc->sc_glock);
2523 				return;
2524 			}
2525 
2526 			DELAY(1000);
2527 		}
2528 
2529 		/*
2530 		 * send the result of TX IQ calibration to uCode.
2531 		 */
2532 		if (res_p->tx_iq_res != NULL) {
2533 			rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2534 			    res_p->tx_iq_res, res_p->tx_iq_res_len, 1);
2535 			if (rv != IWP_SUCCESS) {
2536 				cmn_err(CE_WARN, "iwp_ucode_alive(): "
2537 				    "failed to send TX IQ"
2538 				    "calibration command.\n");
2539 				mutex_exit(&sc->sc_glock);
2540 				return;
2541 			}
2542 
2543 			DELAY(1000);
2544 		}
2545 
2546 		/*
2547 		 * send the result of TX IQ perd calibration to uCode.
2548 		 */
2549 		if (res_p->tx_iq_perd_res != NULL) {
2550 			rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2551 			    res_p->tx_iq_perd_res,
2552 			    res_p->tx_iq_perd_res_len, 1);
2553 			if (rv != IWP_SUCCESS) {
2554 				cmn_err(CE_WARN, "iwp_ucode_alive(): "
2555 				    "failed to send TX IQ perd"
2556 				    "calibration command.\n");
2557 				mutex_exit(&sc->sc_glock);
2558 				return;
2559 			}
2560 
2561 			DELAY(1000);
2562 		}
2563 
2564 		/*
2565 		 * send the result of Base Band calibration to uCode.
2566 		 */
2567 		if (res_p->base_band_res != NULL) {
2568 			rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2569 			    res_p->base_band_res,
2570 			    res_p->base_band_res_len, 1);
2571 			if (rv != IWP_SUCCESS) {
2572 				cmn_err(CE_WARN, "iwp_ucode_alive(): "
2573 				    "failed to send Base Band"
2574 				    "calibration command.\n");
2575 				mutex_exit(&sc->sc_glock);
2576 				return;
2577 			}
2578 
2579 			DELAY(1000);
2580 		}
2581 
2582 		atomic_or_32(&sc->sc_flags, IWP_F_FW_INIT);
2583 		cv_signal(&sc->sc_ucode_cv);
2584 
2585 		mutex_exit(&sc->sc_glock);
2586 	}
2587 
2588 }
2589 
2590 /*
2591  * deal with receiving frames, command response
2592  * and all notifications from ucode.
2593  */
2594 /* ARGSUSED */
2595 static uint_t
2596 iwp_rx_softintr(caddr_t arg, caddr_t unused)
2597 {
2598 	iwp_sc_t *sc;
2599 	ieee80211com_t *ic;
2600 	iwp_rx_desc_t *desc;
2601 	iwp_rx_data_t *data;
2602 	uint32_t index;
2603 
2604 	if (NULL == arg) {
2605 		return (DDI_INTR_UNCLAIMED);
2606 	}
2607 	sc = (iwp_sc_t *)arg;
2608 	ic = &sc->sc_ic;
2609 
2610 	/*
2611 	 * firmware has moved the index of the rx queue, driver get it,
2612 	 * and deal with it.
2613 	 */
2614 	index = (sc->sc_shared->val0) & 0xfff;
2615 
2616 	while (sc->sc_rxq.cur != index) {
2617 		data = &sc->sc_rxq.data[sc->sc_rxq.cur];
2618 		desc = (iwp_rx_desc_t *)data->dma_data.mem_va;
2619 
2620 		IWP_DBG((IWP_DEBUG_INTR, "iwp_rx_softintr(): "
2621 		    "rx notification index = %d"
2622 		    " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n",
2623 		    index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx,
2624 		    desc->hdr.flags, desc->hdr.type, LE_32(desc->len)));
2625 
2626 		/*
2627 		 * a command other than a tx need to be replied
2628 		 */
2629 		if (!(desc->hdr.qid & 0x80) &&
2630 		    (desc->hdr.type != REPLY_SCAN_CMD) &&
2631 		    (desc->hdr.type != REPLY_TX)) {
2632 			iwp_cmd_intr(sc, desc);
2633 		}
2634 
2635 		switch (desc->hdr.type) {
2636 		case REPLY_RX_PHY_CMD:
2637 			iwp_rx_phy_intr(sc, desc);
2638 			break;
2639 
2640 		case REPLY_RX_MPDU_CMD:
2641 			iwp_rx_mpdu_intr(sc, desc);
2642 			break;
2643 
2644 		case REPLY_TX:
2645 			iwp_tx_intr(sc, desc);
2646 			break;
2647 
2648 		case REPLY_ALIVE:
2649 			iwp_ucode_alive(sc, desc);
2650 			break;
2651 
2652 		case CARD_STATE_NOTIFICATION:
2653 		{
2654 			uint32_t *status = (uint32_t *)(desc + 1);
2655 
2656 			IWP_DBG((IWP_DEBUG_RADIO, "iwp_rx_softintr(): "
2657 			    "state changed to %x\n",
2658 			    LE_32(*status)));
2659 
2660 			if (LE_32(*status) & 1) {
2661 				/*
2662 				 * the radio button has to be pushed(OFF). It
2663 				 * is considered as a hw error, the
2664 				 * iwp_thread() tries to recover it after the
2665 				 * button is pushed again(ON)
2666 				 */
2667 				cmn_err(CE_NOTE, "iwp_rx_softintr(): "
2668 				    "radio transmitter is off\n");
2669 				sc->sc_ostate = sc->sc_ic.ic_state;
2670 				ieee80211_new_state(&sc->sc_ic,
2671 				    IEEE80211_S_INIT, -1);
2672 				atomic_or_32(&sc->sc_flags,
2673 				    IWP_F_HW_ERR_RECOVER | IWP_F_RADIO_OFF);
2674 			}
2675 
2676 			break;
2677 		}
2678 
2679 		case SCAN_START_NOTIFICATION:
2680 		{
2681 			iwp_start_scan_t *scan =
2682 			    (iwp_start_scan_t *)(desc + 1);
2683 
2684 			IWP_DBG((IWP_DEBUG_SCAN, "iwp_rx_softintr(): "
2685 			    "scanning channel %d status %x\n",
2686 			    scan->chan, LE_32(scan->status)));
2687 
2688 			ic->ic_curchan = &ic->ic_sup_channels[scan->chan];
2689 			break;
2690 		}
2691 
2692 		case SCAN_COMPLETE_NOTIFICATION:
2693 		{
2694 #ifdef	DEBUG
2695 			iwp_stop_scan_t *scan =
2696 			    (iwp_stop_scan_t *)(desc + 1);
2697 
2698 			IWP_DBG((IWP_DEBUG_SCAN, "iwp_rx_softintr(): "
2699 			    "completed channel %d (burst of %d) status %02x\n",
2700 			    scan->chan, scan->nchan, scan->status));
2701 #endif
2702 
2703 			sc->sc_scan_pending++;
2704 			break;
2705 		}
2706 
2707 		case STATISTICS_NOTIFICATION:
2708 		{
2709 			/*
2710 			 * handle statistics notification
2711 			 */
2712 			break;
2713 		}
2714 
2715 		case CALIBRATION_RES_NOTIFICATION:
2716 			iwp_save_calib_result(sc, desc);
2717 			break;
2718 
2719 		case CALIBRATION_COMPLETE_NOTIFICATION:
2720 			mutex_enter(&sc->sc_glock);
2721 			atomic_or_32(&sc->sc_flags, IWP_F_FW_INIT);
2722 			cv_signal(&sc->sc_ucode_cv);
2723 			mutex_exit(&sc->sc_glock);
2724 			break;
2725 
2726 		case MISSED_BEACONS_NOTIFICATION:
2727 		{
2728 			struct iwp_beacon_missed *miss =
2729 			    (struct iwp_beacon_missed *)(desc + 1);
2730 
2731 			if ((ic->ic_state == IEEE80211_S_RUN) &&
2732 			    (LE_32(miss->consecutive) > 50)) {
2733 				cmn_err(CE_NOTE, "iwp: iwp_rx_softintr(): "
2734 				    "beacon missed %d/%d\n",
2735 				    LE_32(miss->consecutive),
2736 				    LE_32(miss->total));
2737 				(void) ieee80211_new_state(ic,
2738 				    IEEE80211_S_INIT, -1);
2739 			}
2740 			break;
2741 		}
2742 		}
2743 
2744 		sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE;
2745 	}
2746 
2747 	/*
2748 	 * driver dealt with what received in rx queue and tell the information
2749 	 * to the firmware.
2750 	 */
2751 	index = (0 == index) ? RX_QUEUE_SIZE - 1 : index - 1;
2752 	IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7));
2753 
2754 	/*
2755 	 * re-enable interrupts
2756 	 */
2757 	IWP_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2758 
2759 	return (DDI_INTR_CLAIMED);
2760 }
2761 
2762 /*
2763  * the handle of interrupt
2764  */
2765 /* ARGSUSED */
2766 static uint_t
2767 iwp_intr(caddr_t arg, caddr_t unused)
2768 {
2769 	iwp_sc_t *sc;
2770 	uint32_t r, rfh;
2771 
2772 	if (NULL == arg) {
2773 		return (DDI_INTR_UNCLAIMED);
2774 	}
2775 	sc = (iwp_sc_t *)arg;
2776 
2777 	r = IWP_READ(sc, CSR_INT);
2778 	if (0 == r || 0xffffffff == r) {
2779 		return (DDI_INTR_UNCLAIMED);
2780 	}
2781 
2782 	IWP_DBG((IWP_DEBUG_INTR, "iwp_intr(): "
2783 	    "interrupt reg %x\n", r));
2784 
2785 	rfh = IWP_READ(sc, CSR_FH_INT_STATUS);
2786 
2787 	IWP_DBG((IWP_DEBUG_INTR, "iwp_intr(): "
2788 	    "FH interrupt reg %x\n", rfh));
2789 
2790 	/*
2791 	 * disable interrupts
2792 	 */
2793 	IWP_WRITE(sc, CSR_INT_MASK, 0);
2794 
2795 	/*
2796 	 * ack interrupts
2797 	 */
2798 	IWP_WRITE(sc, CSR_INT, r);
2799 	IWP_WRITE(sc, CSR_FH_INT_STATUS, rfh);
2800 
2801 	if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) {
2802 		IWP_DBG((IWP_DEBUG_FW, "iwp_intr(): "
2803 		    "fatal firmware error\n"));
2804 		iwp_stop(sc);
2805 		sc->sc_ostate = sc->sc_ic.ic_state;
2806 
2807 		/* notify upper layer */
2808 		if (!IWP_CHK_FAST_RECOVER(sc)) {
2809 			ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
2810 		}
2811 
2812 		atomic_or_32(&sc->sc_flags, IWP_F_HW_ERR_RECOVER);
2813 		return (DDI_INTR_CLAIMED);
2814 	}
2815 
2816 	if (r & BIT_INT_RF_KILL) {
2817 		uint32_t tmp = IWP_READ(sc, CSR_GP_CNTRL);
2818 		if (tmp & (1 << 27)) {
2819 			cmn_err(CE_NOTE, "RF switch: radio on\n");
2820 		}
2821 	}
2822 
2823 	if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) ||
2824 	    (rfh & FH_INT_RX_MASK)) {
2825 		(void) ddi_intr_trigger_softint(sc->sc_soft_hdl, NULL);
2826 		return (DDI_INTR_CLAIMED);
2827 	}
2828 
2829 	if (r & BIT_INT_FH_TX) {
2830 		mutex_enter(&sc->sc_glock);
2831 		atomic_or_32(&sc->sc_flags, IWP_F_PUT_SEG);
2832 		cv_signal(&sc->sc_put_seg_cv);
2833 		mutex_exit(&sc->sc_glock);
2834 	}
2835 
2836 #ifdef	DEBUG
2837 	if (r & BIT_INT_ALIVE)	{
2838 		IWP_DBG((IWP_DEBUG_FW, "iwp_intr(): "
2839 		    "firmware initialized.\n"));
2840 	}
2841 #endif
2842 
2843 	/*
2844 	 * re-enable interrupts
2845 	 */
2846 	IWP_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2847 
2848 	return (DDI_INTR_CLAIMED);
2849 }
2850 
2851 static uint8_t
2852 iwp_rate_to_plcp(int rate)
2853 {
2854 	uint8_t ret;
2855 
2856 	switch (rate) {
2857 	/*
2858 	 * CCK rates
2859 	 */
2860 	case 2:
2861 		ret = 0xa;
2862 		break;
2863 
2864 	case 4:
2865 		ret = 0x14;
2866 		break;
2867 
2868 	case 11:
2869 		ret = 0x37;
2870 		break;
2871 
2872 	case 22:
2873 		ret = 0x6e;
2874 		break;
2875 
2876 	/*
2877 	 * OFDM rates
2878 	 */
2879 	case 12:
2880 		ret = 0xd;
2881 		break;
2882 
2883 	case 18:
2884 		ret = 0xf;
2885 		break;
2886 
2887 	case 24:
2888 		ret = 0x5;
2889 		break;
2890 
2891 	case 36:
2892 		ret = 0x7;
2893 		break;
2894 
2895 	case 48:
2896 		ret = 0x9;
2897 		break;
2898 
2899 	case 72:
2900 		ret = 0xb;
2901 		break;
2902 
2903 	case 96:
2904 		ret = 0x1;
2905 		break;
2906 
2907 	case 108:
2908 		ret = 0x3;
2909 		break;
2910 
2911 	default:
2912 		ret = 0;
2913 		break;
2914 	}
2915 
2916 	return (ret);
2917 }
2918 
2919 /*
2920  * invoked by GLD send frames
2921  */
2922 static mblk_t *
2923 iwp_m_tx(void *arg, mblk_t *mp)
2924 {
2925 	iwp_sc_t	*sc;
2926 	ieee80211com_t	*ic;
2927 	mblk_t		*next;
2928 
2929 	if (NULL == arg) {
2930 		return (NULL);
2931 	}
2932 	sc = (iwp_sc_t *)arg;
2933 	ic = &sc->sc_ic;
2934 
2935 	if (sc->sc_flags & IWP_F_SUSPEND) {
2936 		freemsgchain(mp);
2937 		return (NULL);
2938 	}
2939 
2940 	if (ic->ic_state != IEEE80211_S_RUN) {
2941 		freemsgchain(mp);
2942 		return (NULL);
2943 	}
2944 
2945 	if ((sc->sc_flags & IWP_F_HW_ERR_RECOVER) &&
2946 	    IWP_CHK_FAST_RECOVER(sc)) {
2947 		IWP_DBG((IWP_DEBUG_FW, "iwp_m_tx(): "
2948 		    "hold queue\n"));
2949 		return (mp);
2950 	}
2951 
2952 
2953 	while (mp != NULL) {
2954 		next = mp->b_next;
2955 		mp->b_next = NULL;
2956 		if (iwp_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) {
2957 			mp->b_next = next;
2958 			break;
2959 		}
2960 		mp = next;
2961 	}
2962 
2963 	return (mp);
2964 }
2965 
2966 /*
2967  * send frames
2968  */
2969 static int
2970 iwp_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
2971 {
2972 	iwp_sc_t *sc;
2973 	iwp_tx_ring_t *ring;
2974 	iwp_tx_desc_t *desc;
2975 	iwp_tx_data_t *data;
2976 	iwp_tx_data_t *desc_data;
2977 	iwp_cmd_t *cmd;
2978 	iwp_tx_cmd_t *tx;
2979 	ieee80211_node_t *in;
2980 	struct ieee80211_frame *wh;
2981 	struct ieee80211_key *k = NULL;
2982 	mblk_t *m, *m0;
2983 	int hdrlen, len, len0, mblen, off, err = IWP_SUCCESS;
2984 	uint16_t masks = 0;
2985 	uint32_t rate, s_id = 0;
2986 
2987 	if (NULL == ic) {
2988 		return (IWP_FAIL);
2989 	}
2990 	sc = (iwp_sc_t *)ic;
2991 
2992 	if (sc->sc_flags & IWP_F_SUSPEND) {
2993 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
2994 		    IEEE80211_FC0_TYPE_DATA) {
2995 			freemsg(mp);
2996 		}
2997 		err = IWP_FAIL;
2998 		goto exit;
2999 	}
3000 
3001 	mutex_enter(&sc->sc_tx_lock);
3002 	ring = &sc->sc_txq[0];
3003 	data = &ring->data[ring->cur];
3004 	cmd = data->cmd;
3005 	bzero(cmd, sizeof (*cmd));
3006 
3007 	ring->cur = (ring->cur + 1) % ring->count;
3008 
3009 	/*
3010 	 * Need reschedule TX if TX buffer is full.
3011 	 */
3012 	if (ring->queued > ring->count - IWP_MAX_WIN_SIZE) {
3013 		IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3014 		"no txbuf\n"));
3015 
3016 		sc->sc_need_reschedule = 1;
3017 		mutex_exit(&sc->sc_tx_lock);
3018 
3019 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
3020 		    IEEE80211_FC0_TYPE_DATA) {
3021 			freemsg(mp);
3022 		}
3023 		sc->sc_tx_nobuf++;
3024 		err = IWP_FAIL;
3025 		goto exit;
3026 	}
3027 
3028 	ring->queued++;
3029 
3030 	mutex_exit(&sc->sc_tx_lock);
3031 
3032 	hdrlen = ieee80211_hdrspace(ic, mp->b_rptr);
3033 
3034 	m = allocb(msgdsize(mp) + 32, BPRI_MED);
3035 	if (NULL == m) { /* can not alloc buf, drop this package */
3036 		cmn_err(CE_WARN, "iwp_send(): "
3037 		    "failed to allocate msgbuf\n");
3038 		freemsg(mp);
3039 
3040 		mutex_enter(&sc->sc_tx_lock);
3041 		ring->queued--;
3042 		if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3043 			sc->sc_need_reschedule = 0;
3044 			mutex_exit(&sc->sc_tx_lock);
3045 			mac_tx_update(ic->ic_mach);
3046 			mutex_enter(&sc->sc_tx_lock);
3047 		}
3048 		mutex_exit(&sc->sc_tx_lock);
3049 
3050 		err = IWP_SUCCESS;
3051 		goto exit;
3052 	}
3053 
3054 	for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) {
3055 		mblen = MBLKL(m0);
3056 		(void) memcpy(m->b_rptr + off, m0->b_rptr, mblen);
3057 		off += mblen;
3058 	}
3059 
3060 	m->b_wptr += off;
3061 
3062 	wh = (struct ieee80211_frame *)m->b_rptr;
3063 
3064 	/*
3065 	 * determine send which AP or station in IBSS
3066 	 */
3067 	in = ieee80211_find_txnode(ic, wh->i_addr1);
3068 	if (NULL == in) {
3069 		cmn_err(CE_WARN, "iwp_send(): "
3070 		    "failed to find tx node\n");
3071 		freemsg(mp);
3072 		freemsg(m);
3073 		sc->sc_tx_err++;
3074 
3075 		mutex_enter(&sc->sc_tx_lock);
3076 		ring->queued--;
3077 		if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3078 			sc->sc_need_reschedule = 0;
3079 			mutex_exit(&sc->sc_tx_lock);
3080 			mac_tx_update(ic->ic_mach);
3081 			mutex_enter(&sc->sc_tx_lock);
3082 		}
3083 		mutex_exit(&sc->sc_tx_lock);
3084 
3085 		err = IWP_SUCCESS;
3086 		goto exit;
3087 	}
3088 
3089 	/*
3090 	 * Net80211 module encapsulate outbound data frames.
3091 	 * Add some feilds of 80211 frame.
3092 	 */
3093 	if ((type & IEEE80211_FC0_TYPE_MASK) ==
3094 	    IEEE80211_FC0_TYPE_DATA) {
3095 		(void) ieee80211_encap(ic, m, in);
3096 	}
3097 
3098 	freemsg(mp);
3099 
3100 	cmd->hdr.type = REPLY_TX;
3101 	cmd->hdr.flags = 0;
3102 	cmd->hdr.qid = ring->qid;
3103 
3104 	tx = (iwp_tx_cmd_t *)cmd->data;
3105 	tx->tx_flags = 0;
3106 
3107 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3108 		tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK));
3109 	} else {
3110 		tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
3111 	}
3112 
3113 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
3114 		k = ieee80211_crypto_encap(ic, m);
3115 		if (NULL == k) {
3116 			freemsg(m);
3117 			sc->sc_tx_err++;
3118 
3119 			mutex_enter(&sc->sc_tx_lock);
3120 			ring->queued--;
3121 			if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3122 				sc->sc_need_reschedule = 0;
3123 				mutex_exit(&sc->sc_tx_lock);
3124 				mac_tx_update(ic->ic_mach);
3125 				mutex_enter(&sc->sc_tx_lock);
3126 			}
3127 			mutex_exit(&sc->sc_tx_lock);
3128 
3129 			err = IWP_SUCCESS;
3130 			goto exit;
3131 		}
3132 
3133 		/* packet header may have moved, reset our local pointer */
3134 		wh = (struct ieee80211_frame *)m->b_rptr;
3135 	}
3136 
3137 	len = msgdsize(m);
3138 
3139 #ifdef DEBUG
3140 	if (iwp_dbg_flags & IWP_DEBUG_TX) {
3141 		ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0);
3142 	}
3143 #endif
3144 
3145 	tx->rts_retry_limit = IWP_TX_RTS_RETRY_LIMIT;
3146 	tx->data_retry_limit = IWP_TX_DATA_RETRY_LIMIT;
3147 
3148 	/*
3149 	 * specific TX parameters for management frames
3150 	 */
3151 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
3152 	    IEEE80211_FC0_TYPE_MGT) {
3153 		/*
3154 		 * mgmt frames are sent at 1M
3155 		 */
3156 		if ((in->in_rates.ir_rates[0] &
3157 		    IEEE80211_RATE_VAL) != 0) {
3158 			rate = in->in_rates.ir_rates[0] & IEEE80211_RATE_VAL;
3159 		} else {
3160 			rate = 2;
3161 		}
3162 
3163 		tx->tx_flags |= LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3164 
3165 		/*
3166 		 * tell h/w to set timestamp in probe responses
3167 		 */
3168 		if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3169 		    IEEE80211_FC0_SUBTYPE_PROBE_RESP) {
3170 			tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK);
3171 
3172 			tx->data_retry_limit = 3;
3173 			if (tx->data_retry_limit < tx->rts_retry_limit) {
3174 				tx->rts_retry_limit = tx->data_retry_limit;
3175 			}
3176 		}
3177 
3178 		if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3179 		    IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
3180 		    ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3181 		    IEEE80211_FC0_SUBTYPE_REASSOC_REQ)) {
3182 			tx->timeout.pm_frame_timeout = LE_16(3);
3183 		} else {
3184 			tx->timeout.pm_frame_timeout = LE_16(2);
3185 		}
3186 
3187 	} else {
3188 		/*
3189 		 * do it here for the software way rate scaling.
3190 		 * later for rate scaling in hardware.
3191 		 *
3192 		 * now the txrate is determined in tx cmd flags, set to the
3193 		 * max value 54M for 11g and 11M for 11b originally.
3194 		 */
3195 		if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
3196 			rate = ic->ic_fixed_rate;
3197 		} else {
3198 			if ((in->in_rates.ir_rates[in->in_txrate] &
3199 			    IEEE80211_RATE_VAL) != 0) {
3200 				rate = in->in_rates.
3201 				    ir_rates[in->in_txrate] &
3202 				    IEEE80211_RATE_VAL;
3203 			}
3204 		}
3205 
3206 		tx->tx_flags |= LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3207 
3208 		tx->timeout.pm_frame_timeout = 0;
3209 	}
3210 
3211 	IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3212 	    "tx rate[%d of %d] = %x",
3213 	    in->in_txrate, in->in_rates.ir_nrates, rate));
3214 
3215 	len0 = roundup(4 + sizeof (iwp_tx_cmd_t) + hdrlen, 4);
3216 	if (len0 != (4 + sizeof (iwp_tx_cmd_t) + hdrlen)) {
3217 		tx->tx_flags |= LE_32(TX_CMD_FLG_MH_PAD_MSK);
3218 	}
3219 
3220 	/*
3221 	 * retrieve destination node's id
3222 	 */
3223 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3224 		tx->sta_id = IWP_BROADCAST_ID;
3225 	} else {
3226 		tx->sta_id = IWP_AP_ID;
3227 	}
3228 
3229 	if (2 == rate || 4 == rate || 11 == rate || 22 == rate) {
3230 		masks |= RATE_MCS_CCK_MSK;
3231 	}
3232 
3233 	masks |= RATE_MCS_ANT_B_MSK;
3234 	tx->rate.r.rate_n_flags = LE_32(iwp_rate_to_plcp(rate) | masks);
3235 
3236 	IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3237 	    "tx flag = %x",
3238 	    tx->tx_flags));
3239 
3240 	tx->stop_time.life_time  = LE_32(0xffffffff);
3241 
3242 	tx->len = LE_16(len);
3243 
3244 	tx->dram_lsb_ptr =
3245 	    LE_32(data->paddr_cmd + 4 + offsetof(iwp_tx_cmd_t, scratch));
3246 	tx->dram_msb_ptr = 0;
3247 	tx->driver_txop = 0;
3248 	tx->next_frame_len = 0;
3249 
3250 	(void) memcpy(tx + 1, m->b_rptr, hdrlen);
3251 	m->b_rptr += hdrlen;
3252 	(void) memcpy(data->dma_data.mem_va, m->b_rptr, len - hdrlen);
3253 
3254 	IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3255 	    "sending data: qid=%d idx=%d len=%d",
3256 	    ring->qid, ring->cur, len));
3257 
3258 	/*
3259 	 * first segment includes the tx cmd plus the 802.11 header,
3260 	 * the second includes the remaining of the 802.11 frame.
3261 	 */
3262 	mutex_enter(&sc->sc_tx_lock);
3263 
3264 	cmd->hdr.idx = ring->desc_cur;
3265 
3266 	desc_data = &ring->data[ring->desc_cur];
3267 	desc = desc_data->desc;
3268 	bzero(desc, sizeof (*desc));
3269 	desc->val0 = 2 << 24;
3270 	desc->pa[0].tb1_addr = data->paddr_cmd;
3271 	desc->pa[0].val1 = ((len0 << 4) & 0xfff0) |
3272 	    ((data->dma_data.cookie.dmac_address & 0xffff) << 16);
3273 	desc->pa[0].val2 =
3274 	    ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) |
3275 	    ((len - hdrlen) << 20);
3276 	IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3277 	    "phy addr1 = 0x%x phy addr2 = 0x%x "
3278 	    "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x",
3279 	    data->paddr_cmd, data->dma_data.cookie.dmac_address,
3280 	    len0, len - hdrlen, desc->pa[0].val1, desc->pa[0].val2));
3281 
3282 	/*
3283 	 * kick ring
3284 	 */
3285 	s_id = tx->sta_id;
3286 
3287 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3288 	    tfd_offset[ring->desc_cur].val =
3289 	    (8 + len) | (s_id << 12);
3290 	if (ring->desc_cur < IWP_MAX_WIN_SIZE) {
3291 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3292 		    tfd_offset[IWP_QUEUE_SIZE + ring->desc_cur].val =
3293 		    (8 + len) | (s_id << 12);
3294 	}
3295 
3296 	IWP_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
3297 	IWP_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
3298 
3299 	ring->desc_cur = (ring->desc_cur + 1) % ring->count;
3300 	IWP_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->desc_cur);
3301 
3302 	mutex_exit(&sc->sc_tx_lock);
3303 	freemsg(m);
3304 
3305 	/*
3306 	 * release node reference
3307 	 */
3308 	ieee80211_free_node(in);
3309 
3310 	ic->ic_stats.is_tx_bytes += len;
3311 	ic->ic_stats.is_tx_frags++;
3312 
3313 	mutex_enter(&sc->sc_mt_lock);
3314 	if (0 == sc->sc_tx_timer) {
3315 		sc->sc_tx_timer = 4;
3316 	}
3317 	mutex_exit(&sc->sc_mt_lock);
3318 
3319 exit:
3320 	return (err);
3321 }
3322 
3323 /*
3324  * invoked by GLD to deal with IOCTL affaires
3325  */
3326 static void
3327 iwp_m_ioctl(void* arg, queue_t *wq, mblk_t *mp)
3328 {
3329 	iwp_sc_t	*sc;
3330 	ieee80211com_t	*ic;
3331 	int		err = EINVAL;
3332 
3333 	if (NULL == arg) {
3334 		return;
3335 	}
3336 	sc = (iwp_sc_t *)arg;
3337 	ic = &sc->sc_ic;
3338 
3339 	err = ieee80211_ioctl(ic, wq, mp);
3340 	if (ENETRESET == err) {
3341 		/*
3342 		 * This is special for the hidden AP connection.
3343 		 * In any case, we should make sure only one 'scan'
3344 		 * in the driver for a 'connect' CLI command. So
3345 		 * when connecting to a hidden AP, the scan is just
3346 		 * sent out to the air when we know the desired
3347 		 * essid of the AP we want to connect.
3348 		 */
3349 		if (ic->ic_des_esslen) {
3350 			if (sc->sc_flags & IWP_F_RUNNING) {
3351 				iwp_m_stop(sc);
3352 				(void) iwp_m_start(sc);
3353 				(void) ieee80211_new_state(ic,
3354 				    IEEE80211_S_SCAN, -1);
3355 			}
3356 		}
3357 	}
3358 }
3359 
3360 /*
3361  * Call back functions for get/set proporty
3362  */
3363 static int
3364 iwp_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3365     uint_t wldp_length, void *wldp_buf)
3366 {
3367 	iwp_sc_t	*sc;
3368 	int		err = EINVAL;
3369 
3370 	if (NULL == arg) {
3371 		return (EINVAL);
3372 	}
3373 	sc = (iwp_sc_t *)arg;
3374 
3375 	err = ieee80211_getprop(&sc->sc_ic, pr_name, wldp_pr_num,
3376 	    wldp_length, wldp_buf);
3377 
3378 	return (err);
3379 }
3380 
3381 static void
3382 iwp_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3383     mac_prop_info_handle_t prh)
3384 {
3385 	iwp_sc_t	*sc;
3386 
3387 	sc = (iwp_sc_t *)arg;
3388 	ieee80211_propinfo(&sc->sc_ic, pr_name, wldp_pr_num, prh);
3389 }
3390 
3391 static int
3392 iwp_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3393     uint_t wldp_length, const void *wldp_buf)
3394 {
3395 	iwp_sc_t		*sc;
3396 	ieee80211com_t		*ic;
3397 	int			err = EINVAL;
3398 
3399 	if (NULL == arg) {
3400 		return (EINVAL);
3401 	}
3402 	sc = (iwp_sc_t *)arg;
3403 	ic = &sc->sc_ic;
3404 
3405 	err = ieee80211_setprop(ic, pr_name, wldp_pr_num, wldp_length,
3406 	    wldp_buf);
3407 
3408 	if (err == ENETRESET) {
3409 		if (ic->ic_des_esslen) {
3410 			if (sc->sc_flags & IWP_F_RUNNING) {
3411 				iwp_m_stop(sc);
3412 				(void) iwp_m_start(sc);
3413 				(void) ieee80211_new_state(ic,
3414 				    IEEE80211_S_SCAN, -1);
3415 			}
3416 		}
3417 		err = 0;
3418 	}
3419 	return (err);
3420 }
3421 
3422 /*
3423  * invoked by GLD supply statistics NIC and driver
3424  */
3425 static int
3426 iwp_m_stat(void *arg, uint_t stat, uint64_t *val)
3427 {
3428 	iwp_sc_t	*sc;
3429 	ieee80211com_t	*ic;
3430 	ieee80211_node_t *in;
3431 
3432 	if (NULL == arg) {
3433 		return (EINVAL);
3434 	}
3435 	sc = (iwp_sc_t *)arg;
3436 	ic = &sc->sc_ic;
3437 
3438 	mutex_enter(&sc->sc_glock);
3439 
3440 	switch (stat) {
3441 	case MAC_STAT_IFSPEED:
3442 		in = ic->ic_bss;
3443 		*val = ((IEEE80211_FIXED_RATE_NONE == ic->ic_fixed_rate) ?
3444 		    IEEE80211_RATE(in->in_txrate) :
3445 		    ic->ic_fixed_rate) / 2 * 1000000;
3446 		break;
3447 	case MAC_STAT_NOXMTBUF:
3448 		*val = sc->sc_tx_nobuf;
3449 		break;
3450 	case MAC_STAT_NORCVBUF:
3451 		*val = sc->sc_rx_nobuf;
3452 		break;
3453 	case MAC_STAT_IERRORS:
3454 		*val = sc->sc_rx_err;
3455 		break;
3456 	case MAC_STAT_RBYTES:
3457 		*val = ic->ic_stats.is_rx_bytes;
3458 		break;
3459 	case MAC_STAT_IPACKETS:
3460 		*val = ic->ic_stats.is_rx_frags;
3461 		break;
3462 	case MAC_STAT_OBYTES:
3463 		*val = ic->ic_stats.is_tx_bytes;
3464 		break;
3465 	case MAC_STAT_OPACKETS:
3466 		*val = ic->ic_stats.is_tx_frags;
3467 		break;
3468 	case MAC_STAT_OERRORS:
3469 	case WIFI_STAT_TX_FAILED:
3470 		*val = sc->sc_tx_err;
3471 		break;
3472 	case WIFI_STAT_TX_RETRANS:
3473 		*val = sc->sc_tx_retries;
3474 		break;
3475 	case WIFI_STAT_FCS_ERRORS:
3476 	case WIFI_STAT_WEP_ERRORS:
3477 	case WIFI_STAT_TX_FRAGS:
3478 	case WIFI_STAT_MCAST_TX:
3479 	case WIFI_STAT_RTS_SUCCESS:
3480 	case WIFI_STAT_RTS_FAILURE:
3481 	case WIFI_STAT_ACK_FAILURE:
3482 	case WIFI_STAT_RX_FRAGS:
3483 	case WIFI_STAT_MCAST_RX:
3484 	case WIFI_STAT_RX_DUPS:
3485 		mutex_exit(&sc->sc_glock);
3486 		return (ieee80211_stat(ic, stat, val));
3487 	default:
3488 		mutex_exit(&sc->sc_glock);
3489 		return (ENOTSUP);
3490 	}
3491 
3492 	mutex_exit(&sc->sc_glock);
3493 
3494 	return (IWP_SUCCESS);
3495 
3496 }
3497 
3498 /*
3499  * invoked by GLD to start or open NIC
3500  */
3501 static int
3502 iwp_m_start(void *arg)
3503 {
3504 	iwp_sc_t *sc;
3505 	ieee80211com_t	*ic;
3506 	int err = IWP_FAIL;
3507 
3508 	if (NULL == arg) {
3509 		return (EINVAL);
3510 	}
3511 	sc = (iwp_sc_t *)arg;
3512 	ic = &sc->sc_ic;
3513 
3514 	err = iwp_init(sc);
3515 	if (err != IWP_SUCCESS) {
3516 		/*
3517 		 * The hw init err(eg. RF is OFF). Return Success to make
3518 		 * the 'plumb' succeed. The iwp_thread() tries to re-init
3519 		 * background.
3520 		 */
3521 		atomic_or_32(&sc->sc_flags, IWP_F_HW_ERR_RECOVER);
3522 		return (IWP_SUCCESS);
3523 	}
3524 
3525 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3526 
3527 	atomic_or_32(&sc->sc_flags, IWP_F_RUNNING);
3528 
3529 	return (IWP_SUCCESS);
3530 }
3531 
3532 /*
3533  * invoked by GLD to stop or down NIC
3534  */
3535 static void
3536 iwp_m_stop(void *arg)
3537 {
3538 	iwp_sc_t *sc;
3539 	ieee80211com_t	*ic;
3540 
3541 	if (NULL == arg) {
3542 		return;
3543 	}
3544 	sc = (iwp_sc_t *)arg;
3545 	ic = &sc->sc_ic;
3546 
3547 	iwp_stop(sc);
3548 
3549 	/*
3550 	 * release buffer for calibration
3551 	 */
3552 	iwp_release_calib_buffer(sc);
3553 
3554 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3555 
3556 	atomic_and_32(&sc->sc_flags, ~IWP_F_HW_ERR_RECOVER);
3557 	atomic_and_32(&sc->sc_flags, ~IWP_F_RATE_AUTO_CTL);
3558 
3559 	atomic_and_32(&sc->sc_flags, ~IWP_F_RUNNING);
3560 	atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
3561 }
3562 
3563 /*
3564  * invoked by GLD to configure NIC
3565  */
3566 static int
3567 iwp_m_unicst(void *arg, const uint8_t *macaddr)
3568 {
3569 	iwp_sc_t *sc;
3570 	ieee80211com_t	*ic;
3571 	int err = IWP_SUCCESS;
3572 
3573 	if (NULL == arg) {
3574 		return (EINVAL);
3575 	}
3576 	sc = (iwp_sc_t *)arg;
3577 	ic = &sc->sc_ic;
3578 
3579 	if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) {
3580 		IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr);
3581 		mutex_enter(&sc->sc_glock);
3582 		err = iwp_config(sc);
3583 		mutex_exit(&sc->sc_glock);
3584 		if (err != IWP_SUCCESS) {
3585 			cmn_err(CE_WARN, "iwp_m_unicst(): "
3586 			    "failed to configure device\n");
3587 			goto fail;
3588 		}
3589 	}
3590 
3591 	return (err);
3592 
3593 fail:
3594 	return (err);
3595 }
3596 
3597 /* ARGSUSED */
3598 static int
3599 iwp_m_multicst(void *arg, boolean_t add, const uint8_t *m)
3600 {
3601 	return (IWP_SUCCESS);
3602 }
3603 
3604 /* ARGSUSED */
3605 static int
3606 iwp_m_promisc(void *arg, boolean_t on)
3607 {
3608 	return (IWP_SUCCESS);
3609 }
3610 
3611 /*
3612  * kernel thread to deal with exceptional situation
3613  */
3614 static void
3615 iwp_thread(iwp_sc_t *sc)
3616 {
3617 	ieee80211com_t	*ic = &sc->sc_ic;
3618 	clock_t clk;
3619 	int err, n = 0, timeout = 0;
3620 	uint32_t tmp;
3621 #ifdef	DEBUG
3622 	int times = 0;
3623 #endif
3624 
3625 	while (sc->sc_mf_thread_switch) {
3626 		tmp = IWP_READ(sc, CSR_GP_CNTRL);
3627 		if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) {
3628 			atomic_and_32(&sc->sc_flags, ~IWP_F_RADIO_OFF);
3629 		} else {
3630 			atomic_or_32(&sc->sc_flags, IWP_F_RADIO_OFF);
3631 		}
3632 
3633 		/*
3634 		 * If  in SUSPEND or the RF is OFF, do nothing.
3635 		 */
3636 		if (sc->sc_flags & IWP_F_RADIO_OFF) {
3637 			delay(drv_usectohz(100000));
3638 			continue;
3639 		}
3640 
3641 		/*
3642 		 * recovery fatal error
3643 		 */
3644 		if (ic->ic_mach &&
3645 		    (sc->sc_flags & IWP_F_HW_ERR_RECOVER)) {
3646 
3647 			IWP_DBG((IWP_DEBUG_FW, "iwp_thread(): "
3648 			    "try to recover fatal hw error: %d\n", times++));
3649 
3650 			iwp_stop(sc);
3651 
3652 			if (IWP_CHK_FAST_RECOVER(sc)) {
3653 				/* save runtime configuration */
3654 				bcopy(&sc->sc_config, &sc->sc_config_save,
3655 				    sizeof (sc->sc_config));
3656 			} else {
3657 				ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3658 				delay(drv_usectohz(2000000 + n*500000));
3659 			}
3660 
3661 			err = iwp_init(sc);
3662 			if (err != IWP_SUCCESS) {
3663 				n++;
3664 				if (n < 20) {
3665 					continue;
3666 				}
3667 			}
3668 
3669 			n = 0;
3670 			if (!err) {
3671 				atomic_or_32(&sc->sc_flags, IWP_F_RUNNING);
3672 			}
3673 
3674 
3675 			if (!IWP_CHK_FAST_RECOVER(sc) ||
3676 			    iwp_fast_recover(sc) != IWP_SUCCESS) {
3677 				atomic_and_32(&sc->sc_flags,
3678 				    ~IWP_F_HW_ERR_RECOVER);
3679 
3680 				delay(drv_usectohz(2000000));
3681 				if (sc->sc_ostate != IEEE80211_S_INIT) {
3682 					ieee80211_new_state(ic,
3683 					    IEEE80211_S_SCAN, 0);
3684 				}
3685 			}
3686 		}
3687 
3688 		if (ic->ic_mach &&
3689 		    (sc->sc_flags & IWP_F_SCANNING) && sc->sc_scan_pending) {
3690 			IWP_DBG((IWP_DEBUG_SCAN, "iwp_thread(): "
3691 			    "wait for probe response\n"));
3692 
3693 			sc->sc_scan_pending--;
3694 			delay(drv_usectohz(200000));
3695 			ieee80211_next_scan(ic);
3696 		}
3697 
3698 		/*
3699 		 * rate ctl
3700 		 */
3701 		if (ic->ic_mach &&
3702 		    (sc->sc_flags & IWP_F_RATE_AUTO_CTL)) {
3703 			clk = ddi_get_lbolt();
3704 			if (clk > sc->sc_clk + drv_usectohz(1000000)) {
3705 				iwp_amrr_timeout(sc);
3706 			}
3707 		}
3708 
3709 		delay(drv_usectohz(100000));
3710 
3711 		mutex_enter(&sc->sc_mt_lock);
3712 		if (sc->sc_tx_timer) {
3713 			timeout++;
3714 			if (10 == timeout) {
3715 				sc->sc_tx_timer--;
3716 				if (0 == sc->sc_tx_timer) {
3717 					atomic_or_32(&sc->sc_flags,
3718 					    IWP_F_HW_ERR_RECOVER);
3719 					sc->sc_ostate = IEEE80211_S_RUN;
3720 					IWP_DBG((IWP_DEBUG_FW, "iwp_thread(): "
3721 					    "try to recover from "
3722 					    "send fail\n"));
3723 				}
3724 				timeout = 0;
3725 			}
3726 		}
3727 		mutex_exit(&sc->sc_mt_lock);
3728 	}
3729 
3730 	mutex_enter(&sc->sc_mt_lock);
3731 	sc->sc_mf_thread = NULL;
3732 	cv_signal(&sc->sc_mt_cv);
3733 	mutex_exit(&sc->sc_mt_lock);
3734 }
3735 
3736 
3737 /*
3738  * Send a command to the ucode.
3739  */
3740 static int
3741 iwp_cmd(iwp_sc_t *sc, int code, const void *buf, int size, int async)
3742 {
3743 	iwp_tx_ring_t *ring = &sc->sc_txq[IWP_CMD_QUEUE_NUM];
3744 	iwp_tx_desc_t *desc;
3745 	iwp_cmd_t *cmd;
3746 
3747 	ASSERT(size <= sizeof (cmd->data));
3748 	ASSERT(mutex_owned(&sc->sc_glock));
3749 
3750 	IWP_DBG((IWP_DEBUG_CMD, "iwp_cmd() "
3751 	    "code[%d]", code));
3752 	desc = ring->data[ring->cur].desc;
3753 	cmd = ring->data[ring->cur].cmd;
3754 
3755 	cmd->hdr.type = (uint8_t)code;
3756 	cmd->hdr.flags = 0;
3757 	cmd->hdr.qid = ring->qid;
3758 	cmd->hdr.idx = ring->cur;
3759 	(void) memcpy(cmd->data, buf, size);
3760 	(void) memset(desc, 0, sizeof (*desc));
3761 
3762 	desc->val0 = 1 << 24;
3763 	desc->pa[0].tb1_addr =
3764 	    (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff);
3765 	desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0;
3766 
3767 	if (async) {
3768 		sc->sc_cmd_accum++;
3769 	}
3770 
3771 	/*
3772 	 * kick cmd ring XXX
3773 	 */
3774 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3775 	    tfd_offset[ring->cur].val = 8;
3776 	if (ring->cur < IWP_MAX_WIN_SIZE) {
3777 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3778 		    tfd_offset[IWP_QUEUE_SIZE + ring->cur].val = 8;
3779 	}
3780 	ring->cur = (ring->cur + 1) % ring->count;
3781 	IWP_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3782 
3783 	if (async) {
3784 		return (IWP_SUCCESS);
3785 	} else {
3786 		clock_t clk;
3787 
3788 		clk = ddi_get_lbolt() + drv_usectohz(2000000);
3789 		while (sc->sc_cmd_flag != SC_CMD_FLG_DONE) {
3790 			if (cv_timedwait(&sc->sc_cmd_cv,
3791 			    &sc->sc_glock, clk) < 0) {
3792 				break;
3793 			}
3794 		}
3795 
3796 		if (SC_CMD_FLG_DONE == sc->sc_cmd_flag) {
3797 			sc->sc_cmd_flag = SC_CMD_FLG_NONE;
3798 			return (IWP_SUCCESS);
3799 		} else {
3800 			sc->sc_cmd_flag = SC_CMD_FLG_NONE;
3801 			return (IWP_FAIL);
3802 		}
3803 	}
3804 }
3805 
3806 /*
3807  * require ucode seting led of NIC
3808  */
3809 static void
3810 iwp_set_led(iwp_sc_t *sc, uint8_t id, uint8_t off, uint8_t on)
3811 {
3812 	iwp_led_cmd_t led;
3813 
3814 	led.interval = LE_32(100000);	/* unit: 100ms */
3815 	led.id = id;
3816 	led.off = off;
3817 	led.on = on;
3818 
3819 	(void) iwp_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1);
3820 }
3821 
3822 /*
3823  * necessary setting to NIC before authentication
3824  */
3825 static int
3826 iwp_hw_set_before_auth(iwp_sc_t *sc)
3827 {
3828 	ieee80211com_t *ic = &sc->sc_ic;
3829 	ieee80211_node_t *in = ic->ic_bss;
3830 	int err = IWP_FAIL;
3831 
3832 	/*
3833 	 * update adapter's configuration according
3834 	 * the info of target AP
3835 	 */
3836 	IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
3837 	sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, in->in_chan));
3838 
3839 	sc->sc_config.ofdm_ht_triple_stream_basic_rates = 0;
3840 	sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0;
3841 	sc->sc_config.ofdm_ht_single_stream_basic_rates = 0;
3842 
3843 	if (IEEE80211_MODE_11B == ic->ic_curmode) {
3844 		sc->sc_config.cck_basic_rates  = 0x03;
3845 		sc->sc_config.ofdm_basic_rates = 0;
3846 	} else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
3847 	    (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
3848 		sc->sc_config.cck_basic_rates  = 0;
3849 		sc->sc_config.ofdm_basic_rates = 0x15;
3850 	} else { /* assume 802.11b/g */
3851 		sc->sc_config.cck_basic_rates  = 0x0f;
3852 		sc->sc_config.ofdm_basic_rates = 0xff;
3853 	}
3854 
3855 	sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
3856 	    RXON_FLG_SHORT_SLOT_MSK);
3857 
3858 	if (ic->ic_flags & IEEE80211_F_SHSLOT) {
3859 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK);
3860 	} else {
3861 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK);
3862 	}
3863 
3864 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
3865 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
3866 	} else {
3867 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK);
3868 	}
3869 
3870 	IWP_DBG((IWP_DEBUG_80211, "iwp_hw_set_before_auth(): "
3871 	    "config chan %d flags %x "
3872 	    "filter_flags %x  cck %x ofdm %x"
3873 	    " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n",
3874 	    LE_16(sc->sc_config.chan), LE_32(sc->sc_config.flags),
3875 	    LE_32(sc->sc_config.filter_flags),
3876 	    sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates,
3877 	    sc->sc_config.bssid[0], sc->sc_config.bssid[1],
3878 	    sc->sc_config.bssid[2], sc->sc_config.bssid[3],
3879 	    sc->sc_config.bssid[4], sc->sc_config.bssid[5]));
3880 
3881 	err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
3882 	    sizeof (iwp_rxon_cmd_t), 1);
3883 	if (err != IWP_SUCCESS) {
3884 		cmn_err(CE_WARN, "iwp_hw_set_before_auth(): "
3885 		    "failed to config chan%d\n", sc->sc_config.chan);
3886 		return (err);
3887 	}
3888 
3889 	/*
3890 	 * add default AP node
3891 	 */
3892 	err = iwp_add_ap_sta(sc);
3893 	if (err != IWP_SUCCESS) {
3894 		return (err);
3895 	}
3896 
3897 
3898 	return (err);
3899 }
3900 
3901 /*
3902  * Send a scan request(assembly scan cmd) to the firmware.
3903  */
3904 static int
3905 iwp_scan(iwp_sc_t *sc)
3906 {
3907 	ieee80211com_t *ic = &sc->sc_ic;
3908 	iwp_tx_ring_t *ring = &sc->sc_txq[IWP_CMD_QUEUE_NUM];
3909 	iwp_tx_desc_t *desc;
3910 	iwp_tx_data_t *data;
3911 	iwp_cmd_t *cmd;
3912 	iwp_scan_hdr_t *hdr;
3913 	iwp_scan_chan_t chan;
3914 	struct ieee80211_frame *wh;
3915 	ieee80211_node_t *in = ic->ic_bss;
3916 	uint8_t essid[IEEE80211_NWID_LEN+1];
3917 	struct ieee80211_rateset *rs;
3918 	enum ieee80211_phymode mode;
3919 	uint8_t *frm;
3920 	int i, pktlen, nrates;
3921 
3922 	data = &ring->data[ring->cur];
3923 	desc = data->desc;
3924 	cmd = (iwp_cmd_t *)data->dma_data.mem_va;
3925 
3926 	cmd->hdr.type = REPLY_SCAN_CMD;
3927 	cmd->hdr.flags = 0;
3928 	cmd->hdr.qid = ring->qid;
3929 	cmd->hdr.idx = ring->cur | 0x40;
3930 
3931 	hdr = (iwp_scan_hdr_t *)cmd->data;
3932 	(void) memset(hdr, 0, sizeof (iwp_scan_hdr_t));
3933 	hdr->nchan = 1;
3934 	hdr->quiet_time = LE_16(50);
3935 	hdr->quiet_plcp_th = LE_16(1);
3936 
3937 	hdr->flags = LE_32(RXON_FLG_BAND_24G_MSK);
3938 	hdr->rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3939 	    (0x7 << RXON_RX_CHAIN_VALID_POS) |
3940 	    (0x2 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3941 	    (0x2 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3942 
3943 	hdr->tx_cmd.tx_flags = LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3944 	hdr->tx_cmd.sta_id = IWP_BROADCAST_ID;
3945 	hdr->tx_cmd.stop_time.life_time = LE_32(0xffffffff);
3946 	hdr->tx_cmd.rate.r.rate_n_flags = LE_32(iwp_rate_to_plcp(2));
3947 	hdr->tx_cmd.rate.r.rate_n_flags |=
3948 	    LE_32(RATE_MCS_ANT_B_MSK |RATE_MCS_CCK_MSK);
3949 	hdr->direct_scan[0].len = ic->ic_des_esslen;
3950 	hdr->direct_scan[0].id  = IEEE80211_ELEMID_SSID;
3951 
3952 	hdr->filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3953 	    RXON_FILTER_BCON_AWARE_MSK);
3954 
3955 	if (ic->ic_des_esslen) {
3956 		bcopy(ic->ic_des_essid, essid, ic->ic_des_esslen);
3957 		essid[ic->ic_des_esslen] = '\0';
3958 		IWP_DBG((IWP_DEBUG_SCAN, "iwp_scan(): "
3959 		    "directed scan %s\n", essid));
3960 
3961 		bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid,
3962 		    ic->ic_des_esslen);
3963 	} else {
3964 		bzero(hdr->direct_scan[0].ssid,
3965 		    sizeof (hdr->direct_scan[0].ssid));
3966 	}
3967 
3968 	/*
3969 	 * a probe request frame is required after the REPLY_SCAN_CMD
3970 	 */
3971 	wh = (struct ieee80211_frame *)(hdr + 1);
3972 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
3973 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
3974 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
3975 	(void) memset(wh->i_addr1, 0xff, 6);
3976 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr);
3977 	(void) memset(wh->i_addr3, 0xff, 6);
3978 	*(uint16_t *)&wh->i_dur[0] = 0;
3979 	*(uint16_t *)&wh->i_seq[0] = 0;
3980 
3981 	frm = (uint8_t *)(wh + 1);
3982 
3983 	/*
3984 	 * essid IE
3985 	 */
3986 	if (in->in_esslen) {
3987 		bcopy(in->in_essid, essid, in->in_esslen);
3988 		essid[in->in_esslen] = '\0';
3989 		IWP_DBG((IWP_DEBUG_SCAN, "iwp_scan(): "
3990 		    "probe with ESSID %s\n",
3991 		    essid));
3992 	}
3993 	*frm++ = IEEE80211_ELEMID_SSID;
3994 	*frm++ = in->in_esslen;
3995 	(void) memcpy(frm, in->in_essid, in->in_esslen);
3996 	frm += in->in_esslen;
3997 
3998 	mode = ieee80211_chan2mode(ic, ic->ic_curchan);
3999 	rs = &ic->ic_sup_rates[mode];
4000 
4001 	/*
4002 	 * supported rates IE
4003 	 */
4004 	*frm++ = IEEE80211_ELEMID_RATES;
4005 	nrates = rs->ir_nrates;
4006 	if (nrates > IEEE80211_RATE_SIZE) {
4007 		nrates = IEEE80211_RATE_SIZE;
4008 	}
4009 
4010 	*frm++ = (uint8_t)nrates;
4011 	(void) memcpy(frm, rs->ir_rates, nrates);
4012 	frm += nrates;
4013 
4014 	/*
4015 	 * supported xrates IE
4016 	 */
4017 	if (rs->ir_nrates > IEEE80211_RATE_SIZE) {
4018 		nrates = rs->ir_nrates - IEEE80211_RATE_SIZE;
4019 		*frm++ = IEEE80211_ELEMID_XRATES;
4020 		*frm++ = (uint8_t)nrates;
4021 		(void) memcpy(frm, rs->ir_rates + IEEE80211_RATE_SIZE, nrates);
4022 		frm += nrates;
4023 	}
4024 
4025 	/*
4026 	 * optionnal IE (usually for wpa)
4027 	 */
4028 	if (ic->ic_opt_ie != NULL) {
4029 		(void) memcpy(frm, ic->ic_opt_ie, ic->ic_opt_ie_len);
4030 		frm += ic->ic_opt_ie_len;
4031 	}
4032 
4033 	/* setup length of probe request */
4034 	hdr->tx_cmd.len = LE_16(_PTRDIFF(frm, wh));
4035 	hdr->len = LE_16(hdr->nchan * sizeof (iwp_scan_chan_t) +
4036 	    LE_16(hdr->tx_cmd.len) + sizeof (iwp_scan_hdr_t));
4037 
4038 	/*
4039 	 * the attribute of the scan channels are required after the probe
4040 	 * request frame.
4041 	 */
4042 	for (i = 1; i <= hdr->nchan; i++) {
4043 		if (ic->ic_des_esslen) {
4044 			chan.type = LE_32(3);
4045 		} else {
4046 			chan.type = LE_32(1);
4047 		}
4048 
4049 		chan.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan));
4050 		chan.tpc.tx_gain = 0x28;
4051 		chan.tpc.dsp_atten = 110;
4052 		chan.active_dwell = LE_16(50);
4053 		chan.passive_dwell = LE_16(120);
4054 
4055 		bcopy(&chan, frm, sizeof (iwp_scan_chan_t));
4056 		frm += sizeof (iwp_scan_chan_t);
4057 	}
4058 
4059 	pktlen = _PTRDIFF(frm, cmd);
4060 
4061 	(void) memset(desc, 0, sizeof (*desc));
4062 	desc->val0 = 1 << 24;
4063 	desc->pa[0].tb1_addr =
4064 	    (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff);
4065 	desc->pa[0].val1 = (pktlen << 4) & 0xfff0;
4066 
4067 	/*
4068 	 * maybe for cmd, filling the byte cnt table is not necessary.
4069 	 * anyway, we fill it here.
4070 	 */
4071 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid]
4072 	    .tfd_offset[ring->cur].val = 8;
4073 	if (ring->cur < IWP_MAX_WIN_SIZE) {
4074 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
4075 		    tfd_offset[IWP_QUEUE_SIZE + ring->cur].val = 8;
4076 	}
4077 
4078 	/*
4079 	 * kick cmd ring
4080 	 */
4081 	ring->cur = (ring->cur + 1) % ring->count;
4082 	IWP_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4083 
4084 	return (IWP_SUCCESS);
4085 }
4086 
4087 /*
4088  * configure NIC by using ucode commands after loading ucode.
4089  */
4090 static int
4091 iwp_config(iwp_sc_t *sc)
4092 {
4093 	ieee80211com_t *ic = &sc->sc_ic;
4094 	iwp_powertable_cmd_t powertable;
4095 	iwp_bt_cmd_t bt;
4096 	iwp_add_sta_t node;
4097 	iwp_rem_sta_t	rm_sta;
4098 	const uint8_t bcast[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
4099 	int err = IWP_FAIL;
4100 
4101 	/*
4102 	 * set power mode. Disable power management at present, do it later
4103 	 */
4104 	(void) memset(&powertable, 0, sizeof (powertable));
4105 	powertable.flags = LE_16(0x8);
4106 	err = iwp_cmd(sc, POWER_TABLE_CMD, &powertable,
4107 	    sizeof (powertable), 0);
4108 	if (err != IWP_SUCCESS) {
4109 		cmn_err(CE_WARN, "iwp_config(): "
4110 		    "failed to set power mode\n");
4111 		return (err);
4112 	}
4113 
4114 	/*
4115 	 * configure bt coexistence
4116 	 */
4117 	(void) memset(&bt, 0, sizeof (bt));
4118 	bt.flags = 3;
4119 	bt.lead_time = 0xaa;
4120 	bt.max_kill = 1;
4121 	err = iwp_cmd(sc, REPLY_BT_CONFIG, &bt,
4122 	    sizeof (bt), 0);
4123 	if (err != IWP_SUCCESS) {
4124 		cmn_err(CE_WARN, "iwp_config(): "
4125 		    "failed to configurate bt coexistence\n");
4126 		return (err);
4127 	}
4128 
4129 	/*
4130 	 * configure rxon
4131 	 */
4132 	(void) memset(&sc->sc_config, 0, sizeof (iwp_rxon_cmd_t));
4133 	IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
4134 	IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
4135 	sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan));
4136 	sc->sc_config.flags = LE_32(RXON_FLG_BAND_24G_MSK);
4137 	sc->sc_config.flags &= LE_32(~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
4138 	    RXON_FLG_CHANNEL_MODE_PURE_40_MSK));
4139 
4140 	switch (ic->ic_opmode) {
4141 	case IEEE80211_M_STA:
4142 		sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
4143 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4144 		    RXON_FILTER_DIS_DECRYPT_MSK |
4145 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
4146 		break;
4147 	case IEEE80211_M_IBSS:
4148 	case IEEE80211_M_AHDEMO:
4149 		sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
4150 
4151 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
4152 		sc->sc_config.filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4153 		    RXON_FILTER_DIS_DECRYPT_MSK |
4154 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
4155 		break;
4156 	case IEEE80211_M_HOSTAP:
4157 		sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
4158 		break;
4159 	case IEEE80211_M_MONITOR:
4160 		sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
4161 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4162 		    RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
4163 		break;
4164 	}
4165 
4166 	/*
4167 	 * Support all CCK rates.
4168 	 */
4169 	sc->sc_config.cck_basic_rates  = 0x0f;
4170 
4171 	/*
4172 	 * Support all OFDM rates.
4173 	 */
4174 	sc->sc_config.ofdm_basic_rates = 0xff;
4175 
4176 	sc->sc_config.rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK |
4177 	    (0x7 << RXON_RX_CHAIN_VALID_POS) |
4178 	    (0x2 << RXON_RX_CHAIN_FORCE_SEL_POS) |
4179 	    (0x2 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
4180 
4181 	err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
4182 	    sizeof (iwp_rxon_cmd_t), 0);
4183 	if (err != IWP_SUCCESS) {
4184 		cmn_err(CE_WARN, "iwp_config(): "
4185 		    "failed to set configure command\n");
4186 		return (err);
4187 	}
4188 
4189 	/*
4190 	 * remove all nodes in NIC
4191 	 */
4192 	(void) memset(&rm_sta, 0, sizeof (rm_sta));
4193 	rm_sta.num_sta = 1;
4194 	(void) memcpy(rm_sta.addr, bcast, 6);
4195 
4196 	err = iwp_cmd(sc, REPLY_REMOVE_STA, &rm_sta, sizeof (iwp_rem_sta_t), 0);
4197 	if (err != IWP_SUCCESS) {
4198 		cmn_err(CE_WARN, "iwp_config(): "
4199 		    "failed to remove broadcast node in hardware.\n");
4200 		return (err);
4201 	}
4202 
4203 	/*
4204 	 * add broadcast node so that we can send broadcast frame
4205 	 */
4206 	(void) memset(&node, 0, sizeof (node));
4207 	(void) memset(node.sta.addr, 0xff, 6);
4208 	node.mode = 0;
4209 	node.sta.sta_id = IWP_BROADCAST_ID;
4210 	node.station_flags = 0;
4211 
4212 	err = iwp_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0);
4213 	if (err != IWP_SUCCESS) {
4214 		cmn_err(CE_WARN, "iwp_config(): "
4215 		    "failed to add broadcast node\n");
4216 		return (err);
4217 	}
4218 
4219 	return (err);
4220 }
4221 
4222 /*
4223  * quiesce(9E) entry point.
4224  * This function is called when the system is single-threaded at high
4225  * PIL with preemption disabled. Therefore, this function must not be
4226  * blocked.
4227  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
4228  * DDI_FAILURE indicates an error condition and should almost never happen.
4229  */
4230 static int
4231 iwp_quiesce(dev_info_t *dip)
4232 {
4233 	iwp_sc_t *sc;
4234 
4235 	sc = ddi_get_soft_state(iwp_soft_state_p, ddi_get_instance(dip));
4236 	if (NULL == sc) {
4237 		return (DDI_FAILURE);
4238 	}
4239 
4240 #ifdef DEBUG
4241 	/* by pass any messages, if it's quiesce */
4242 	iwp_dbg_flags = 0;
4243 #endif
4244 
4245 	/*
4246 	 * No more blocking is allowed while we are in the
4247 	 * quiesce(9E) entry point.
4248 	 */
4249 	atomic_or_32(&sc->sc_flags, IWP_F_QUIESCED);
4250 
4251 	/*
4252 	 * Disable and mask all interrupts.
4253 	 */
4254 	iwp_stop(sc);
4255 
4256 	return (DDI_SUCCESS);
4257 }
4258 
4259 static void
4260 iwp_stop_master(iwp_sc_t *sc)
4261 {
4262 	uint32_t tmp;
4263 	int n;
4264 
4265 	tmp = IWP_READ(sc, CSR_RESET);
4266 	IWP_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER);
4267 
4268 	tmp = IWP_READ(sc, CSR_GP_CNTRL);
4269 	if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) ==
4270 	    CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE) {
4271 		return;
4272 	}
4273 
4274 	for (n = 0; n < 2000; n++) {
4275 		if (IWP_READ(sc, CSR_RESET) &
4276 		    CSR_RESET_REG_FLAG_MASTER_DISABLED) {
4277 			break;
4278 		}
4279 		DELAY(1000);
4280 	}
4281 
4282 #ifdef	DEBUG
4283 	if (2000 == n) {
4284 		IWP_DBG((IWP_DEBUG_HW, "iwp_stop_master(): "
4285 		    "timeout waiting for master stop\n"));
4286 	}
4287 #endif
4288 }
4289 
4290 static int
4291 iwp_power_up(iwp_sc_t *sc)
4292 {
4293 	uint32_t tmp;
4294 
4295 	iwp_mac_access_enter(sc);
4296 	tmp = iwp_reg_read(sc, ALM_APMG_PS_CTL);
4297 	tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC;
4298 	tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN;
4299 	iwp_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4300 	iwp_mac_access_exit(sc);
4301 
4302 	DELAY(5000);
4303 	return (IWP_SUCCESS);
4304 }
4305 
4306 /*
4307  * hardware initialization
4308  */
4309 static int
4310 iwp_preinit(iwp_sc_t *sc)
4311 {
4312 	int		n;
4313 	uint8_t		vlink;
4314 	uint16_t	radio_cfg;
4315 	uint32_t	tmp;
4316 
4317 	/*
4318 	 * clear any pending interrupts
4319 	 */
4320 	IWP_WRITE(sc, CSR_INT, 0xffffffff);
4321 
4322 	tmp = IWP_READ(sc, CSR_GIO_CHICKEN_BITS);
4323 	IWP_WRITE(sc, CSR_GIO_CHICKEN_BITS,
4324 	    tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
4325 
4326 	tmp = IWP_READ(sc, CSR_GP_CNTRL);
4327 	IWP_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4328 
4329 	/*
4330 	 * wait for clock ready
4331 	 */
4332 	for (n = 0; n < 1000; n++) {
4333 		if (IWP_READ(sc, CSR_GP_CNTRL) &
4334 		    CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) {
4335 			break;
4336 		}
4337 		DELAY(10);
4338 	}
4339 
4340 	if (1000 == n) {
4341 		return (ETIMEDOUT);
4342 	}
4343 
4344 	iwp_mac_access_enter(sc);
4345 
4346 	iwp_reg_write(sc, ALM_APMG_CLK_EN, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4347 
4348 	DELAY(20);
4349 	tmp = iwp_reg_read(sc, ALM_APMG_PCIDEV_STT);
4350 	iwp_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp |
4351 	    APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE);
4352 	iwp_mac_access_exit(sc);
4353 
4354 	radio_cfg = IWP_READ_EEP_SHORT(sc, EEP_SP_RADIO_CONFIGURATION);
4355 	if (SP_RADIO_TYPE_MSK(radio_cfg) < SP_RADIO_TYPE_MAX) {
4356 		tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4357 		IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4358 		    tmp | SP_RADIO_TYPE_MSK(radio_cfg) |
4359 		    SP_RADIO_STEP_MSK(radio_cfg) |
4360 		    SP_RADIO_DASH_MSK(radio_cfg));
4361 	} else {
4362 		cmn_err(CE_WARN, "iwp_preinit(): "
4363 		    "radio configuration information in eeprom is wrong\n");
4364 		return (IWP_FAIL);
4365 	}
4366 
4367 
4368 	IWP_WRITE(sc, CSR_INT_COALESCING, 512 / 32);
4369 
4370 	(void) iwp_power_up(sc);
4371 
4372 	if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) {
4373 		tmp = ddi_get32(sc->sc_cfg_handle,
4374 		    (uint32_t *)(sc->sc_cfg_base + 0xe8));
4375 		ddi_put32(sc->sc_cfg_handle,
4376 		    (uint32_t *)(sc->sc_cfg_base + 0xe8),
4377 		    tmp & ~(1 << 11));
4378 	}
4379 
4380 	vlink = ddi_get8(sc->sc_cfg_handle,
4381 	    (uint8_t *)(sc->sc_cfg_base + 0xf0));
4382 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0),
4383 	    vlink & ~2);
4384 
4385 	tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4386 	tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
4387 	    CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
4388 	IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG, tmp);
4389 
4390 	/*
4391 	 * make sure power supply on each part of the hardware
4392 	 */
4393 	iwp_mac_access_enter(sc);
4394 	tmp = iwp_reg_read(sc, ALM_APMG_PS_CTL);
4395 	tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4396 	iwp_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4397 	DELAY(5);
4398 
4399 	tmp = iwp_reg_read(sc, ALM_APMG_PS_CTL);
4400 	tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4401 	iwp_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4402 	iwp_mac_access_exit(sc);
4403 
4404 	if (PA_TYPE_MIX == sc->sc_chip_param.pa_type) {
4405 		IWP_WRITE(sc, CSR_GP_DRIVER_REG,
4406 		    CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_MIX);
4407 	}
4408 
4409 	if (PA_TYPE_INTER == sc->sc_chip_param.pa_type) {
4410 
4411 		IWP_WRITE(sc, CSR_GP_DRIVER_REG,
4412 		    CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
4413 	}
4414 
4415 	return (IWP_SUCCESS);
4416 }
4417 
4418 /*
4419  * set up semphore flag to own EEPROM
4420  */
4421 static int
4422 iwp_eep_sem_down(iwp_sc_t *sc)
4423 {
4424 	int count1, count2;
4425 	uint32_t tmp;
4426 
4427 	for (count1 = 0; count1 < 1000; count1++) {
4428 		tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4429 		IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4430 		    tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM);
4431 
4432 		for (count2 = 0; count2 < 2; count2++) {
4433 			if (IWP_READ(sc, CSR_HW_IF_CONFIG_REG) &
4434 			    CSR_HW_IF_CONFIG_REG_EEP_SEM) {
4435 				return (IWP_SUCCESS);
4436 			}
4437 			DELAY(10000);
4438 		}
4439 	}
4440 	return (IWP_FAIL);
4441 }
4442 
4443 /*
4444  * reset semphore flag to release EEPROM
4445  */
4446 static void
4447 iwp_eep_sem_up(iwp_sc_t *sc)
4448 {
4449 	uint32_t tmp;
4450 
4451 	tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4452 	IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4453 	    tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM));
4454 }
4455 
4456 /*
4457  * This function read all infomation from eeprom
4458  */
4459 static int
4460 iwp_eep_load(iwp_sc_t *sc)
4461 {
4462 	int i, rr;
4463 	uint32_t rv, tmp, eep_gp;
4464 	uint16_t addr, eep_sz = sizeof (sc->sc_eep_map);
4465 	uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map;
4466 
4467 	/*
4468 	 * read eeprom gp register in CSR
4469 	 */
4470 	eep_gp = IWP_READ(sc, CSR_EEPROM_GP);
4471 	if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) ==
4472 	    CSR_EEPROM_GP_BAD_SIGNATURE) {
4473 		IWP_DBG((IWP_DEBUG_EEPROM, "iwp_eep_load(): "
4474 		    "not find eeprom\n"));
4475 		return (IWP_FAIL);
4476 	}
4477 
4478 	rr = iwp_eep_sem_down(sc);
4479 	if (rr != 0) {
4480 		IWP_DBG((IWP_DEBUG_EEPROM, "iwp_eep_load(): "
4481 		    "driver failed to own EEPROM\n"));
4482 		return (IWP_FAIL);
4483 	}
4484 
4485 	for (addr = 0; addr < eep_sz; addr += 2) {
4486 		IWP_WRITE(sc, CSR_EEPROM_REG, addr<<1);
4487 		tmp = IWP_READ(sc, CSR_EEPROM_REG);
4488 		IWP_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2));
4489 
4490 		for (i = 0; i < 10; i++) {
4491 			rv = IWP_READ(sc, CSR_EEPROM_REG);
4492 			if (rv & 1) {
4493 				break;
4494 			}
4495 			DELAY(10);
4496 		}
4497 
4498 		if (!(rv & 1)) {
4499 			IWP_DBG((IWP_DEBUG_EEPROM, "iwp_eep_load(): "
4500 			    "time out when read eeprome\n"));
4501 			iwp_eep_sem_up(sc);
4502 			return (IWP_FAIL);
4503 		}
4504 
4505 		eep_p[addr/2] = LE_16(rv >> 16);
4506 	}
4507 
4508 	iwp_eep_sem_up(sc);
4509 	return (IWP_SUCCESS);
4510 }
4511 
4512 /*
4513  * initialize mac address in ieee80211com_t struct
4514  */
4515 static void
4516 iwp_get_mac_from_eep(iwp_sc_t *sc)
4517 {
4518 	ieee80211com_t *ic = &sc->sc_ic;
4519 
4520 	IEEE80211_ADDR_COPY(ic->ic_macaddr, &sc->sc_eep_map[EEP_MAC_ADDRESS]);
4521 
4522 	IWP_DBG((IWP_DEBUG_EEPROM, "iwp_get_mac_from_eep(): "
4523 	    "mac:%2x:%2x:%2x:%2x:%2x:%2x\n",
4524 	    ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2],
4525 	    ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5]));
4526 }
4527 
4528 /*
4529  * main initialization function
4530  */
4531 static int
4532 iwp_init(iwp_sc_t *sc)
4533 {
4534 	int err = IWP_FAIL;
4535 	clock_t clk;
4536 
4537 	/*
4538 	 * release buffer for calibration
4539 	 */
4540 	iwp_release_calib_buffer(sc);
4541 
4542 	mutex_enter(&sc->sc_glock);
4543 	atomic_and_32(&sc->sc_flags, ~IWP_F_FW_INIT);
4544 
4545 	err = iwp_init_common(sc);
4546 	if (err != IWP_SUCCESS) {
4547 		mutex_exit(&sc->sc_glock);
4548 		return (IWP_FAIL);
4549 	}
4550 
4551 	/*
4552 	 * backup ucode data part for future use.
4553 	 */
4554 	(void) memcpy(sc->sc_dma_fw_data_bak.mem_va,
4555 	    sc->sc_dma_fw_data.mem_va,
4556 	    sc->sc_dma_fw_data.alength);
4557 
4558 	/* load firmware init segment into NIC */
4559 	err = iwp_load_init_firmware(sc);
4560 	if (err != IWP_SUCCESS) {
4561 		cmn_err(CE_WARN, "iwp_init(): "
4562 		    "failed to setup init firmware\n");
4563 		mutex_exit(&sc->sc_glock);
4564 		return (IWP_FAIL);
4565 	}
4566 
4567 	/*
4568 	 * now press "execute" start running
4569 	 */
4570 	IWP_WRITE(sc, CSR_RESET, 0);
4571 
4572 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
4573 	while (!(sc->sc_flags & IWP_F_FW_INIT)) {
4574 		if (cv_timedwait(&sc->sc_ucode_cv,
4575 		    &sc->sc_glock, clk) < 0) {
4576 			break;
4577 		}
4578 	}
4579 
4580 	if (!(sc->sc_flags & IWP_F_FW_INIT)) {
4581 		cmn_err(CE_WARN, "iwp_init(): "
4582 		    "failed to process init alive.\n");
4583 		mutex_exit(&sc->sc_glock);
4584 		return (IWP_FAIL);
4585 	}
4586 
4587 	mutex_exit(&sc->sc_glock);
4588 
4589 	/*
4590 	 * stop chipset for initializing chipset again
4591 	 */
4592 	iwp_stop(sc);
4593 
4594 	mutex_enter(&sc->sc_glock);
4595 	atomic_and_32(&sc->sc_flags, ~IWP_F_FW_INIT);
4596 
4597 	err = iwp_init_common(sc);
4598 	if (err != IWP_SUCCESS) {
4599 		mutex_exit(&sc->sc_glock);
4600 		return (IWP_FAIL);
4601 	}
4602 
4603 	/*
4604 	 * load firmware run segment into NIC
4605 	 */
4606 	err = iwp_load_run_firmware(sc);
4607 	if (err != IWP_SUCCESS) {
4608 		cmn_err(CE_WARN, "iwp_init(): "
4609 		    "failed to setup run firmware\n");
4610 		mutex_exit(&sc->sc_glock);
4611 		return (IWP_FAIL);
4612 	}
4613 
4614 	/*
4615 	 * now press "execute" start running
4616 	 */
4617 	IWP_WRITE(sc, CSR_RESET, 0);
4618 
4619 	clk = ddi_get_lbolt() + drv_usectohz(1000000);
4620 	while (!(sc->sc_flags & IWP_F_FW_INIT)) {
4621 		if (cv_timedwait(&sc->sc_ucode_cv,
4622 		    &sc->sc_glock, clk) < 0) {
4623 			break;
4624 		}
4625 	}
4626 
4627 	if (!(sc->sc_flags & IWP_F_FW_INIT)) {
4628 		cmn_err(CE_WARN, "iwp_init(): "
4629 		    "failed to process runtime alive.\n");
4630 		mutex_exit(&sc->sc_glock);
4631 		return (IWP_FAIL);
4632 	}
4633 
4634 	mutex_exit(&sc->sc_glock);
4635 
4636 	DELAY(1000);
4637 
4638 	mutex_enter(&sc->sc_glock);
4639 	atomic_and_32(&sc->sc_flags, ~IWP_F_FW_INIT);
4640 
4641 	/*
4642 	 * at this point, the firmware is loaded OK, then config the hardware
4643 	 * with the ucode API, including rxon, txpower, etc.
4644 	 */
4645 	err = iwp_config(sc);
4646 	if (err) {
4647 		cmn_err(CE_WARN, "iwp_init(): "
4648 		    "failed to configure device\n");
4649 		mutex_exit(&sc->sc_glock);
4650 		return (IWP_FAIL);
4651 	}
4652 
4653 	/*
4654 	 * at this point, hardware may receive beacons :)
4655 	 */
4656 	mutex_exit(&sc->sc_glock);
4657 	return (IWP_SUCCESS);
4658 }
4659 
4660 /*
4661  * stop or disable NIC
4662  */
4663 static void
4664 iwp_stop(iwp_sc_t *sc)
4665 {
4666 	uint32_t tmp;
4667 	int i;
4668 
4669 	/* by pass if it's quiesced */
4670 	if (!(sc->sc_flags & IWP_F_QUIESCED)) {
4671 		mutex_enter(&sc->sc_glock);
4672 	}
4673 
4674 	IWP_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
4675 	/*
4676 	 * disable interrupts
4677 	 */
4678 	IWP_WRITE(sc, CSR_INT_MASK, 0);
4679 	IWP_WRITE(sc, CSR_INT, CSR_INI_SET_MASK);
4680 	IWP_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff);
4681 
4682 	/*
4683 	 * reset all Tx rings
4684 	 */
4685 	for (i = 0; i < IWP_NUM_QUEUES; i++) {
4686 		iwp_reset_tx_ring(sc, &sc->sc_txq[i]);
4687 	}
4688 
4689 	/*
4690 	 * reset Rx ring
4691 	 */
4692 	iwp_reset_rx_ring(sc);
4693 
4694 	iwp_mac_access_enter(sc);
4695 	iwp_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4696 	iwp_mac_access_exit(sc);
4697 
4698 	DELAY(5);
4699 
4700 	iwp_stop_master(sc);
4701 
4702 	mutex_enter(&sc->sc_mt_lock);
4703 	sc->sc_tx_timer = 0;
4704 	mutex_exit(&sc->sc_mt_lock);
4705 
4706 	tmp = IWP_READ(sc, CSR_RESET);
4707 	IWP_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET);
4708 
4709 	/* by pass if it's quiesced */
4710 	if (!(sc->sc_flags & IWP_F_QUIESCED)) {
4711 		mutex_exit(&sc->sc_glock);
4712 	}
4713 }
4714 
4715 /*
4716  * Naive implementation of the Adaptive Multi Rate Retry algorithm:
4717  * "IEEE 802.11 Rate Adaptation: A Practical Approach"
4718  * Mathieu Lacage, Hossein Manshaei, Thierry Turletti
4719  * INRIA Sophia - Projet Planete
4720  * http://www-sop.inria.fr/rapports/sophia/RR-5208.html
4721  */
4722 #define	is_success(amrr)	\
4723 	((amrr)->retrycnt < (amrr)->txcnt / 10)
4724 #define	is_failure(amrr)	\
4725 	((amrr)->retrycnt > (amrr)->txcnt / 3)
4726 #define	is_enough(amrr)		\
4727 	((amrr)->txcnt > 200)
4728 #define	not_very_few(amrr)	\
4729 	((amrr)->txcnt > 40)
4730 #define	is_min_rate(in)		\
4731 	(0 == (in)->in_txrate)
4732 #define	is_max_rate(in)		\
4733 	((in)->in_rates.ir_nrates - 1 == (in)->in_txrate)
4734 #define	increase_rate(in)	\
4735 	((in)->in_txrate++)
4736 #define	decrease_rate(in)	\
4737 	((in)->in_txrate--)
4738 #define	reset_cnt(amrr)		\
4739 	{ (amrr)->txcnt = (amrr)->retrycnt = 0; }
4740 
4741 #define	IWP_AMRR_MIN_SUCCESS_THRESHOLD	 1
4742 #define	IWP_AMRR_MAX_SUCCESS_THRESHOLD	15
4743 
4744 static void
4745 iwp_amrr_init(iwp_amrr_t *amrr)
4746 {
4747 	amrr->success = 0;
4748 	amrr->recovery = 0;
4749 	amrr->txcnt = amrr->retrycnt = 0;
4750 	amrr->success_threshold = IWP_AMRR_MIN_SUCCESS_THRESHOLD;
4751 }
4752 
4753 static void
4754 iwp_amrr_timeout(iwp_sc_t *sc)
4755 {
4756 	ieee80211com_t *ic = &sc->sc_ic;
4757 
4758 	IWP_DBG((IWP_DEBUG_RATECTL, "iwp_amrr_timeout(): "
4759 	    "enter\n"));
4760 
4761 	if (IEEE80211_M_STA == ic->ic_opmode) {
4762 		iwp_amrr_ratectl(NULL, ic->ic_bss);
4763 	} else {
4764 		ieee80211_iterate_nodes(&ic->ic_sta, iwp_amrr_ratectl, NULL);
4765 	}
4766 
4767 	sc->sc_clk = ddi_get_lbolt();
4768 }
4769 
4770 /* ARGSUSED */
4771 static void
4772 iwp_amrr_ratectl(void *arg, ieee80211_node_t *in)
4773 {
4774 	iwp_amrr_t *amrr = (iwp_amrr_t *)in;
4775 	int need_change = 0;
4776 
4777 	if (is_success(amrr) && is_enough(amrr)) {
4778 		amrr->success++;
4779 		if (amrr->success >= amrr->success_threshold &&
4780 		    !is_max_rate(in)) {
4781 			amrr->recovery = 1;
4782 			amrr->success = 0;
4783 			increase_rate(in);
4784 			IWP_DBG((IWP_DEBUG_RATECTL, "iwp_amrr_ratectl(): "
4785 			    "AMRR increasing rate %d "
4786 			    "(txcnt=%d retrycnt=%d)\n",
4787 			    in->in_txrate, amrr->txcnt,
4788 			    amrr->retrycnt));
4789 			need_change = 1;
4790 		} else {
4791 			amrr->recovery = 0;
4792 		}
4793 	} else if (not_very_few(amrr) && is_failure(amrr)) {
4794 		amrr->success = 0;
4795 		if (!is_min_rate(in)) {
4796 			if (amrr->recovery) {
4797 				amrr->success_threshold++;
4798 				if (amrr->success_threshold >
4799 				    IWP_AMRR_MAX_SUCCESS_THRESHOLD) {
4800 					amrr->success_threshold =
4801 					    IWP_AMRR_MAX_SUCCESS_THRESHOLD;
4802 				}
4803 			} else {
4804 				amrr->success_threshold =
4805 				    IWP_AMRR_MIN_SUCCESS_THRESHOLD;
4806 			}
4807 			decrease_rate(in);
4808 			IWP_DBG((IWP_DEBUG_RATECTL, "iwp_amrr_ratectl(): "
4809 			    "AMRR decreasing rate %d "
4810 			    "(txcnt=%d retrycnt=%d)\n",
4811 			    in->in_txrate, amrr->txcnt,
4812 			    amrr->retrycnt));
4813 			need_change = 1;
4814 		}
4815 		amrr->recovery = 0;	/* paper is incorrect */
4816 	}
4817 
4818 	if (is_enough(amrr) || need_change) {
4819 		reset_cnt(amrr);
4820 	}
4821 }
4822 
4823 /*
4824  * translate indirect address in eeprom to direct address
4825  * in eeprom and return address of entry whos indirect address
4826  * is indi_addr
4827  */
4828 static uint8_t *
4829 iwp_eep_addr_trans(iwp_sc_t *sc, uint32_t indi_addr)
4830 {
4831 	uint32_t	di_addr;
4832 	uint16_t	temp;
4833 
4834 	if (!(indi_addr & INDIRECT_ADDRESS)) {
4835 		di_addr = indi_addr;
4836 		return (&sc->sc_eep_map[di_addr]);
4837 	}
4838 
4839 	switch (indi_addr & INDIRECT_TYPE_MSK) {
4840 	case INDIRECT_GENERAL:
4841 		temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_GENERAL);
4842 		break;
4843 	case	INDIRECT_HOST:
4844 		temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_HOST);
4845 		break;
4846 	case	INDIRECT_REGULATORY:
4847 		temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_REGULATORY);
4848 		break;
4849 	case	INDIRECT_CALIBRATION:
4850 		temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_CALIBRATION);
4851 		break;
4852 	case	INDIRECT_PROCESS_ADJST:
4853 		temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_PROCESS_ADJST);
4854 		break;
4855 	case	INDIRECT_OTHERS:
4856 		temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_OTHERS);
4857 		break;
4858 	default:
4859 		temp = 0;
4860 		cmn_err(CE_WARN, "iwp_eep_addr_trans(): "
4861 		    "incorrect indirect eeprom address.\n");
4862 		break;
4863 	}
4864 
4865 	di_addr = (indi_addr & ADDRESS_MSK) + (temp << 1);
4866 
4867 	return (&sc->sc_eep_map[di_addr]);
4868 }
4869 
4870 /*
4871  * loade a section of ucode into NIC
4872  */
4873 static int
4874 iwp_put_seg_fw(iwp_sc_t *sc, uint32_t addr_s, uint32_t addr_d, uint32_t len)
4875 {
4876 
4877 	iwp_mac_access_enter(sc);
4878 
4879 	IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(IWP_FH_SRVC_CHNL),
4880 	    IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
4881 
4882 	IWP_WRITE(sc, IWP_FH_SRVC_CHNL_SRAM_ADDR_REG(IWP_FH_SRVC_CHNL), addr_d);
4883 
4884 	IWP_WRITE(sc, IWP_FH_TFDIB_CTRL0_REG(IWP_FH_SRVC_CHNL),
4885 	    (addr_s & FH_MEM_TFDIB_DRAM_ADDR_LSB_MASK));
4886 
4887 	IWP_WRITE(sc, IWP_FH_TFDIB_CTRL1_REG(IWP_FH_SRVC_CHNL), len);
4888 
4889 	IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_BUF_STS_REG(IWP_FH_SRVC_CHNL),
4890 	    (1 << IWP_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
4891 	    (1 << IWP_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
4892 	    IWP_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
4893 
4894 	IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(IWP_FH_SRVC_CHNL),
4895 	    IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
4896 	    IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL |
4897 	    IWP_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
4898 
4899 	iwp_mac_access_exit(sc);
4900 
4901 	return (IWP_SUCCESS);
4902 }
4903 
4904 /*
4905  * necessary setting during alive notification
4906  */
4907 static int
4908 iwp_alive_common(iwp_sc_t *sc)
4909 {
4910 	uint32_t	base;
4911 	uint32_t	i;
4912 	iwp_wimax_coex_cmd_t	w_cmd;
4913 	iwp_calibration_crystal_cmd_t	c_cmd;
4914 	uint32_t	rv = IWP_FAIL;
4915 
4916 	/*
4917 	 * initialize SCD related registers to make TX work.
4918 	 */
4919 	iwp_mac_access_enter(sc);
4920 
4921 	/*
4922 	 * read sram address of data base.
4923 	 */
4924 	sc->sc_scd_base = iwp_reg_read(sc, IWP_SCD_SRAM_BASE_ADDR);
4925 
4926 	for (base = sc->sc_scd_base + IWP_SCD_CONTEXT_DATA_OFFSET;
4927 	    base < sc->sc_scd_base + IWP_SCD_TX_STTS_BITMAP_OFFSET;
4928 	    base += 4) {
4929 		iwp_mem_write(sc, base, 0);
4930 	}
4931 
4932 	for (; base < sc->sc_scd_base + IWP_SCD_TRANSLATE_TBL_OFFSET;
4933 	    base += 4) {
4934 		iwp_mem_write(sc, base, 0);
4935 	}
4936 
4937 	for (i = 0; i < sizeof (uint16_t) * IWP_NUM_QUEUES; i += 4) {
4938 		iwp_mem_write(sc, base + i, 0);
4939 	}
4940 
4941 	iwp_reg_write(sc, IWP_SCD_DRAM_BASE_ADDR,
4942 	    sc->sc_dma_sh.cookie.dmac_address >> 10);
4943 
4944 	iwp_reg_write(sc, IWP_SCD_QUEUECHAIN_SEL,
4945 	    IWP_SCD_QUEUECHAIN_SEL_ALL(IWP_NUM_QUEUES));
4946 
4947 	iwp_reg_write(sc, IWP_SCD_AGGR_SEL, 0);
4948 
4949 	for (i = 0; i < IWP_NUM_QUEUES; i++) {
4950 		iwp_reg_write(sc, IWP_SCD_QUEUE_RDPTR(i), 0);
4951 		IWP_WRITE(sc, HBUS_TARG_WRPTR, 0 | (i << 8));
4952 		iwp_mem_write(sc, sc->sc_scd_base +
4953 		    IWP_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
4954 		iwp_mem_write(sc, sc->sc_scd_base +
4955 		    IWP_SCD_CONTEXT_QUEUE_OFFSET(i) +
4956 		    sizeof (uint32_t),
4957 		    ((SCD_WIN_SIZE << IWP_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
4958 		    IWP_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
4959 		    ((SCD_FRAME_LIMIT <<
4960 		    IWP_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
4961 		    IWP_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
4962 	}
4963 
4964 	iwp_reg_write(sc, IWP_SCD_INTERRUPT_MASK, (1 << IWP_NUM_QUEUES) - 1);
4965 
4966 	iwp_reg_write(sc, (IWP_SCD_BASE + 0x10),
4967 	    SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
4968 
4969 	IWP_WRITE(sc, HBUS_TARG_WRPTR, (IWP_CMD_QUEUE_NUM << 8));
4970 	iwp_reg_write(sc, IWP_SCD_QUEUE_RDPTR(IWP_CMD_QUEUE_NUM), 0);
4971 
4972 	/*
4973 	 * queue 0-7 map to FIFO 0-7 and
4974 	 * all queues work under FIFO mode(none-scheduler_ack)
4975 	 */
4976 	for (i = 0; i < 4; i++) {
4977 		iwp_reg_write(sc, IWP_SCD_QUEUE_STATUS_BITS(i),
4978 		    (1 << IWP_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
4979 		    ((3-i) << IWP_SCD_QUEUE_STTS_REG_POS_TXF) |
4980 		    (1 << IWP_SCD_QUEUE_STTS_REG_POS_WSL) |
4981 		    IWP_SCD_QUEUE_STTS_REG_MSK);
4982 	}
4983 
4984 	iwp_reg_write(sc, IWP_SCD_QUEUE_STATUS_BITS(IWP_CMD_QUEUE_NUM),
4985 	    (1 << IWP_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
4986 	    (IWP_CMD_FIFO_NUM << IWP_SCD_QUEUE_STTS_REG_POS_TXF) |
4987 	    (1 << IWP_SCD_QUEUE_STTS_REG_POS_WSL) |
4988 	    IWP_SCD_QUEUE_STTS_REG_MSK);
4989 
4990 	for (i = 5; i < 7; i++) {
4991 		iwp_reg_write(sc, IWP_SCD_QUEUE_STATUS_BITS(i),
4992 		    (1 << IWP_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
4993 		    (i << IWP_SCD_QUEUE_STTS_REG_POS_TXF) |
4994 		    (1 << IWP_SCD_QUEUE_STTS_REG_POS_WSL) |
4995 		    IWP_SCD_QUEUE_STTS_REG_MSK);
4996 	}
4997 
4998 	iwp_mac_access_exit(sc);
4999 
5000 	(void) memset(&w_cmd, 0, sizeof (w_cmd));
5001 
5002 	rv = iwp_cmd(sc, COEX_PRIORITY_TABLE_CMD, &w_cmd, sizeof (w_cmd), 1);
5003 	if (rv != IWP_SUCCESS) {
5004 		cmn_err(CE_WARN, "iwp_alive_common(): "
5005 		    "failed to send wimax coexist command.\n");
5006 		return (rv);
5007 	}
5008 
5009 	(void) memset(&c_cmd, 0, sizeof (c_cmd));
5010 
5011 	c_cmd.opCode = PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
5012 	c_cmd.data.cap_pin1 = LE_16(sc->sc_eep_calib->xtal_calib[0]);
5013 	c_cmd.data.cap_pin2 = LE_16(sc->sc_eep_calib->xtal_calib[1]);
5014 
5015 	rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD, &c_cmd, sizeof (c_cmd), 1);
5016 	if (rv != IWP_SUCCESS) {
5017 		cmn_err(CE_WARN, "iwp_alive_common(): "
5018 		    "failed to send crystal frq calibration command.\n");
5019 		return (rv);
5020 	}
5021 
5022 	/*
5023 	 * make sure crystal frequency calibration ready
5024 	 * before next operations.
5025 	 */
5026 	DELAY(1000);
5027 
5028 	return (IWP_SUCCESS);
5029 }
5030 
5031 /*
5032  * save results of calibration from ucode
5033  */
5034 static void
5035 iwp_save_calib_result(iwp_sc_t *sc, iwp_rx_desc_t *desc)
5036 {
5037 	struct iwp_calib_results *res_p = &sc->sc_calib_results;
5038 	struct iwp_calib_hdr *calib_hdr = (struct iwp_calib_hdr *)(desc + 1);
5039 	int len = LE_32(desc->len);
5040 
5041 	/*
5042 	 * ensure the size of buffer is not too big
5043 	 */
5044 	len = (len & FH_RSCSR_FRAME_SIZE_MASK) - 4;
5045 
5046 	switch (calib_hdr->op_code) {
5047 	case PHY_CALIBRATE_LO_CMD:
5048 		if (NULL == res_p->lo_res) {
5049 			res_p->lo_res = kmem_alloc(len, KM_NOSLEEP);
5050 		}
5051 
5052 		if (NULL == res_p->lo_res) {
5053 			cmn_err(CE_WARN, "iwp_save_calib_result(): "
5054 			    "failed to allocate memory.\n");
5055 			return;
5056 		}
5057 
5058 		res_p->lo_res_len = len;
5059 		(void) memcpy(res_p->lo_res, calib_hdr, len);
5060 		break;
5061 	case PHY_CALIBRATE_TX_IQ_CMD:
5062 		if (NULL == res_p->tx_iq_res) {
5063 			res_p->tx_iq_res = kmem_alloc(len, KM_NOSLEEP);
5064 		}
5065 
5066 		if (NULL == res_p->tx_iq_res) {
5067 			cmn_err(CE_WARN, "iwp_save_calib_result(): "
5068 			    "failed to allocate memory.\n");
5069 			return;
5070 		}
5071 
5072 		res_p->tx_iq_res_len = len;
5073 		(void) memcpy(res_p->tx_iq_res, calib_hdr, len);
5074 		break;
5075 	case PHY_CALIBRATE_TX_IQ_PERD_CMD:
5076 		if (NULL == res_p->tx_iq_perd_res) {
5077 			res_p->tx_iq_perd_res = kmem_alloc(len, KM_NOSLEEP);
5078 		}
5079 
5080 		if (NULL == res_p->tx_iq_perd_res) {
5081 			cmn_err(CE_WARN, "iwp_save_calib_result(): "
5082 			    "failed to allocate memory.\n");
5083 		}
5084 
5085 		res_p->tx_iq_perd_res_len = len;
5086 		(void) memcpy(res_p->tx_iq_perd_res, calib_hdr, len);
5087 		break;
5088 	case PHY_CALIBRATE_BASE_BAND_CMD:
5089 		if (NULL == res_p->base_band_res) {
5090 			res_p->base_band_res = kmem_alloc(len, KM_NOSLEEP);
5091 		}
5092 
5093 		if (NULL == res_p->base_band_res) {
5094 			cmn_err(CE_WARN, "iwp_save_calib_result(): "
5095 			    "failed to allocate memory.\n");
5096 		}
5097 
5098 		res_p->base_band_res_len = len;
5099 		(void) memcpy(res_p->base_band_res, calib_hdr, len);
5100 		break;
5101 	default:
5102 		cmn_err(CE_WARN, "iwp_save_calib_result(): "
5103 		    "incorrect calibration type(%d).\n", calib_hdr->op_code);
5104 		break;
5105 	}
5106 
5107 }
5108 
5109 static void
5110 iwp_release_calib_buffer(iwp_sc_t *sc)
5111 {
5112 	if (sc->sc_calib_results.lo_res != NULL) {
5113 		kmem_free(sc->sc_calib_results.lo_res,
5114 		    sc->sc_calib_results.lo_res_len);
5115 		sc->sc_calib_results.lo_res = NULL;
5116 	}
5117 
5118 	if (sc->sc_calib_results.tx_iq_res != NULL) {
5119 		kmem_free(sc->sc_calib_results.tx_iq_res,
5120 		    sc->sc_calib_results.tx_iq_res_len);
5121 		sc->sc_calib_results.tx_iq_res = NULL;
5122 	}
5123 
5124 	if (sc->sc_calib_results.tx_iq_perd_res != NULL) {
5125 		kmem_free(sc->sc_calib_results.tx_iq_perd_res,
5126 		    sc->sc_calib_results.tx_iq_perd_res_len);
5127 		sc->sc_calib_results.tx_iq_perd_res = NULL;
5128 	}
5129 
5130 	if (sc->sc_calib_results.base_band_res != NULL) {
5131 		kmem_free(sc->sc_calib_results.base_band_res,
5132 		    sc->sc_calib_results.base_band_res_len);
5133 		sc->sc_calib_results.base_band_res = NULL;
5134 	}
5135 
5136 }
5137 
5138 /*
5139  * common section of intialization
5140  */
5141 static int
5142 iwp_init_common(iwp_sc_t *sc)
5143 {
5144 	int32_t	qid;
5145 	uint32_t tmp;
5146 
5147 	(void) iwp_preinit(sc);
5148 
5149 	tmp = IWP_READ(sc, CSR_GP_CNTRL);
5150 	if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) {
5151 		cmn_err(CE_NOTE, "iwp_init_common(): "
5152 		    "radio transmitter is off\n");
5153 		return (IWP_FAIL);
5154 	}
5155 
5156 	/*
5157 	 * init Rx ring
5158 	 */
5159 	iwp_mac_access_enter(sc);
5160 	IWP_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
5161 
5162 	IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
5163 	IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
5164 	    sc->sc_rxq.dma_desc.cookie.dmac_address >> 8);
5165 
5166 	IWP_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG,
5167 	    ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address +
5168 	    offsetof(struct iwp_shared, val0)) >> 4));
5169 
5170 	IWP_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG,
5171 	    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
5172 	    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
5173 	    IWP_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
5174 	    (RX_QUEUE_SIZE_LOG <<
5175 	    FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
5176 	iwp_mac_access_exit(sc);
5177 	IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG,
5178 	    (RX_QUEUE_SIZE - 1) & ~0x7);
5179 
5180 	/*
5181 	 * init Tx rings
5182 	 */
5183 	iwp_mac_access_enter(sc);
5184 	iwp_reg_write(sc, IWP_SCD_TXFACT, 0);
5185 
5186 	/*
5187 	 * keep warm page
5188 	 */
5189 	IWP_WRITE(sc, IWP_FH_KW_MEM_ADDR_REG,
5190 	    sc->sc_dma_kw.cookie.dmac_address >> 4);
5191 
5192 	for (qid = 0; qid < IWP_NUM_QUEUES; qid++) {
5193 		IWP_WRITE(sc, FH_MEM_CBBC_QUEUE(qid),
5194 		    sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8);
5195 		IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(qid),
5196 		    IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
5197 		    IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
5198 	}
5199 
5200 	iwp_mac_access_exit(sc);
5201 
5202 	/*
5203 	 * clear "radio off" and "disable command" bits
5204 	 */
5205 	IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5206 	IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR,
5207 	    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5208 
5209 	/*
5210 	 * clear any pending interrupts
5211 	 */
5212 	IWP_WRITE(sc, CSR_INT, 0xffffffff);
5213 
5214 	/*
5215 	 * enable interrupts
5216 	 */
5217 	IWP_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
5218 
5219 	IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5220 	IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5221 
5222 	return (IWP_SUCCESS);
5223 }
5224 
5225 static int
5226 iwp_fast_recover(iwp_sc_t *sc)
5227 {
5228 	ieee80211com_t *ic = &sc->sc_ic;
5229 	int err = IWP_FAIL;
5230 
5231 	mutex_enter(&sc->sc_glock);
5232 
5233 	/* restore runtime configuration */
5234 	bcopy(&sc->sc_config_save, &sc->sc_config,
5235 	    sizeof (sc->sc_config));
5236 
5237 	sc->sc_config.assoc_id = 0;
5238 	sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
5239 
5240 	if ((err = iwp_hw_set_before_auth(sc)) != IWP_SUCCESS) {
5241 		cmn_err(CE_WARN, "iwp_fast_recover(): "
5242 		    "could not setup authentication\n");
5243 		mutex_exit(&sc->sc_glock);
5244 		return (err);
5245 	}
5246 
5247 	bcopy(&sc->sc_config_save, &sc->sc_config,
5248 	    sizeof (sc->sc_config));
5249 
5250 	/* update adapter's configuration */
5251 	err = iwp_run_state_config(sc);
5252 	if (err != IWP_SUCCESS) {
5253 		cmn_err(CE_WARN, "iwp_fast_recover(): "
5254 		    "failed to setup association\n");
5255 		mutex_exit(&sc->sc_glock);
5256 		return (err);
5257 	}
5258 	/* set LED on */
5259 	iwp_set_led(sc, 2, 0, 1);
5260 
5261 	mutex_exit(&sc->sc_glock);
5262 
5263 	atomic_and_32(&sc->sc_flags, ~IWP_F_HW_ERR_RECOVER);
5264 
5265 	/* start queue */
5266 	IWP_DBG((IWP_DEBUG_FW, "iwp_fast_recover(): "
5267 	    "resume xmit\n"));
5268 	mac_tx_update(ic->ic_mach);
5269 
5270 	return (IWP_SUCCESS);
5271 }
5272 
5273 static int
5274 iwp_run_state_config(iwp_sc_t *sc)
5275 {
5276 	struct ieee80211com *ic = &sc->sc_ic;
5277 	ieee80211_node_t *in = ic->ic_bss;
5278 	int err = IWP_FAIL;
5279 
5280 	/*
5281 	 * update adapter's configuration
5282 	 */
5283 	sc->sc_config.assoc_id = in->in_associd & 0x3fff;
5284 
5285 	/*
5286 	 * short preamble/slot time are
5287 	 * negotiated when associating
5288 	 */
5289 	sc->sc_config.flags &=
5290 	    ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
5291 	    RXON_FLG_SHORT_SLOT_MSK);
5292 
5293 	if (ic->ic_flags & IEEE80211_F_SHSLOT) {
5294 		sc->sc_config.flags |=
5295 		    LE_32(RXON_FLG_SHORT_SLOT_MSK);
5296 	}
5297 
5298 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
5299 		sc->sc_config.flags |=
5300 		    LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
5301 	}
5302 
5303 	sc->sc_config.filter_flags |=
5304 	    LE_32(RXON_FILTER_ASSOC_MSK);
5305 
5306 	if (ic->ic_opmode != IEEE80211_M_STA) {
5307 		sc->sc_config.filter_flags |=
5308 		    LE_32(RXON_FILTER_BCON_AWARE_MSK);
5309 	}
5310 
5311 	IWP_DBG((IWP_DEBUG_80211, "iwp_run_state_config(): "
5312 	    "config chan %d flags %x"
5313 	    " filter_flags %x\n",
5314 	    sc->sc_config.chan, sc->sc_config.flags,
5315 	    sc->sc_config.filter_flags));
5316 
5317 	err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
5318 	    sizeof (iwp_rxon_cmd_t), 1);
5319 	if (err != IWP_SUCCESS) {
5320 		cmn_err(CE_WARN, "iwp_run_state_config(): "
5321 		    "could not update configuration\n");
5322 		return (err);
5323 	}
5324 
5325 	return (err);
5326 }
5327 
5328 /*
5329  * This function overwrites default configurations of
5330  * ieee80211com structure in Net80211 module.
5331  */
5332 static void
5333 iwp_overwrite_ic_default(iwp_sc_t *sc)
5334 {
5335 	ieee80211com_t *ic = &sc->sc_ic;
5336 
5337 	sc->sc_newstate = ic->ic_newstate;
5338 	ic->ic_newstate = iwp_newstate;
5339 	ic->ic_node_alloc = iwp_node_alloc;
5340 	ic->ic_node_free = iwp_node_free;
5341 }
5342 
5343 
5344 /*
5345  * This function adds AP station into hardware.
5346  */
5347 static int
5348 iwp_add_ap_sta(iwp_sc_t *sc)
5349 {
5350 	ieee80211com_t *ic = &sc->sc_ic;
5351 	ieee80211_node_t *in = ic->ic_bss;
5352 	iwp_add_sta_t node;
5353 	int err = IWP_FAIL;
5354 
5355 	/*
5356 	 * Add AP node into hardware.
5357 	 */
5358 	(void) memset(&node, 0, sizeof (node));
5359 	IEEE80211_ADDR_COPY(node.sta.addr, in->in_bssid);
5360 	node.mode = STA_MODE_ADD_MSK;
5361 	node.sta.sta_id = IWP_AP_ID;
5362 
5363 	err = iwp_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
5364 	if (err != IWP_SUCCESS) {
5365 		cmn_err(CE_WARN, "iwp_add_ap_sta(): "
5366 		    "failed to add AP node\n");
5367 		return (err);
5368 	}
5369 
5370 	return (err);
5371 }
5372 
5373 /*
5374  * Check EEPROM version and Calibration version.
5375  */
5376 static int
5377 iwp_eep_ver_chk(iwp_sc_t *sc)
5378 {
5379 	if ((IWP_READ_EEP_SHORT(sc, EEP_VERSION) < 0x011a) ||
5380 	    (sc->sc_eep_calib->tx_pow_calib_hdr.calib_version < 4)) {
5381 		cmn_err(CE_WARN, "iwp_eep_ver_chk(): "
5382 		    "unsupported eeprom detected\n");
5383 		return (IWP_FAIL);
5384 	}
5385 
5386 	return (IWP_SUCCESS);
5387 }
5388 
5389 /*
5390  * Determine parameters for all supported chips.
5391  */
5392 static void
5393 iwp_set_chip_param(iwp_sc_t *sc)
5394 {
5395 	if ((0x008d == sc->sc_dev_id) ||
5396 	    (0x008e == sc->sc_dev_id)) {
5397 		sc->sc_chip_param.phy_mode = PHY_MODE_G |
5398 		    PHY_MODE_A | PHY_MODE_N;
5399 
5400 		sc->sc_chip_param.tx_ant = ANT_A | ANT_B;
5401 		sc->sc_chip_param.rx_ant = ANT_A | ANT_B;
5402 
5403 		sc->sc_chip_param.pa_type = PA_TYPE_MIX;
5404 	}
5405 
5406 	if ((0x422c == sc->sc_dev_id) ||
5407 	    (0x4239 == sc->sc_dev_id)) {
5408 		sc->sc_chip_param.phy_mode = PHY_MODE_G |
5409 		    PHY_MODE_A | PHY_MODE_N;
5410 
5411 		sc->sc_chip_param.tx_ant = ANT_B | ANT_C;
5412 		sc->sc_chip_param.rx_ant = ANT_B | ANT_C;
5413 
5414 		sc->sc_chip_param.pa_type = PA_TYPE_INTER;
5415 	}
5416 
5417 	if ((0x422b == sc->sc_dev_id) ||
5418 	    (0x4238 == sc->sc_dev_id)) {
5419 		sc->sc_chip_param.phy_mode = PHY_MODE_G |
5420 		    PHY_MODE_A | PHY_MODE_N;
5421 
5422 		sc->sc_chip_param.tx_ant = ANT_A | ANT_B | ANT_C;
5423 		sc->sc_chip_param.rx_ant = ANT_A | ANT_B | ANT_C;
5424 
5425 		sc->sc_chip_param.pa_type = PA_TYPE_SYSTEM;
5426 	}
5427 }
5428