xref: /illumos-gate/usr/src/uts/common/io/iwk/iwk2.c (revision 922d2c76afbee21520ffa2088c4e60dcb80d3945)
1 /*
2  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright (c) 2007, Intel Corporation
8  * All rights reserved.
9  */
10 
11 /*
12  * Copyright (c) 2006
13  * Copyright (c) 2007
14  *	Damien Bergamini <damien.bergamini@free.fr>
15  *
16  * Permission to use, copy, modify, and distribute this software for any
17  * purpose with or without fee is hereby granted, provided that the above
18  * copyright notice and this permission notice appear in all copies.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
21  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
22  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
24  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
25  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
26  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27  */
28 
29 #pragma ident	"%Z%%M%	%I%	%E% SMI"
30 
31 /*
32  * Driver for Intel PRO/Wireless 4965AGN(kedron) 802.11 network adapters.
33  */
34 
35 #include <sys/types.h>
36 #include <sys/byteorder.h>
37 #include <sys/conf.h>
38 #include <sys/cmn_err.h>
39 #include <sys/stat.h>
40 #include <sys/ddi.h>
41 #include <sys/sunddi.h>
42 #include <sys/strsubr.h>
43 #include <sys/ethernet.h>
44 #include <inet/common.h>
45 #include <inet/nd.h>
46 #include <inet/mi.h>
47 #include <sys/note.h>
48 #include <sys/stream.h>
49 #include <sys/strsun.h>
50 #include <sys/modctl.h>
51 #include <sys/devops.h>
52 #include <sys/dlpi.h>
53 #include <sys/mac.h>
54 #include <sys/mac_wifi.h>
55 #include <sys/net80211.h>
56 #include <sys/net80211_proto.h>
57 #include <sys/varargs.h>
58 #include <sys/policy.h>
59 #include <sys/pci.h>
60 
61 #include "iwk_hw.h"
62 #include "iwk_eeprom.h"
63 #include "iwk2_var.h"
64 #include <inet/wifi_ioctl.h>
65 
66 #ifdef DEBUG
67 #define	IWK_DEBUG_80211		(1 << 0)
68 #define	IWK_DEBUG_CMD		(1 << 1)
69 #define	IWK_DEBUG_DMA		(1 << 2)
70 #define	IWK_DEBUG_EEPROM	(1 << 3)
71 #define	IWK_DEBUG_FW		(1 << 4)
72 #define	IWK_DEBUG_HW		(1 << 5)
73 #define	IWK_DEBUG_INTR		(1 << 6)
74 #define	IWK_DEBUG_MRR		(1 << 7)
75 #define	IWK_DEBUG_PIO		(1 << 8)
76 #define	IWK_DEBUG_RX		(1 << 9)
77 #define	IWK_DEBUG_SCAN		(1 << 10)
78 #define	IWK_DEBUG_TX		(1 << 11)
79 #define	IWK_DEBUG_RATECTL	(1 << 12)
80 #define	IWK_DEBUG_RADIO		(1 << 13)
81 #define	IWK_DEBUG_RESUME	(1 << 14)
82 uint32_t iwk_dbg_flags = 0;
83 #define	IWK_DBG(x) \
84 	iwk_dbg x
85 #else
86 #define	IWK_DBG(x)
87 #endif
88 
89 static void	*iwk_soft_state_p = NULL;
90 static uint8_t iwk_fw_bin [] = {
91 #include "fw-iw/iw4965.ucode.hex"
92 };
93 
94 /* DMA attributes for a shared page */
95 static ddi_dma_attr_t sh_dma_attr = {
96 	DMA_ATTR_V0,	/* version of this structure */
97 	0,		/* lowest usable address */
98 	0xffffffffU,	/* highest usable address */
99 	0xffffffffU,	/* maximum DMAable byte count */
100 	0x1000,		/* alignment in bytes */
101 	0x1000,		/* burst sizes (any?) */
102 	1,		/* minimum transfer */
103 	0xffffffffU,	/* maximum transfer */
104 	0xffffffffU,	/* maximum segment length */
105 	1,		/* maximum number of segments */
106 	1,		/* granularity */
107 	0,		/* flags (reserved) */
108 };
109 
110 /* DMA attributes for a keep warm DRAM descriptor */
111 static ddi_dma_attr_t kw_dma_attr = {
112 	DMA_ATTR_V0,	/* version of this structure */
113 	0,		/* lowest usable address */
114 	0xffffffffU,	/* highest usable address */
115 	0xffffffffU,	/* maximum DMAable byte count */
116 	0x1000,		/* alignment in bytes */
117 	0x1000,		/* burst sizes (any?) */
118 	1,		/* minimum transfer */
119 	0xffffffffU,	/* maximum transfer */
120 	0xffffffffU,	/* maximum segment length */
121 	1,		/* maximum number of segments */
122 	1,		/* granularity */
123 	0,		/* flags (reserved) */
124 };
125 
126 /* DMA attributes for a ring descriptor */
127 static ddi_dma_attr_t ring_desc_dma_attr = {
128 	DMA_ATTR_V0,	/* version of this structure */
129 	0,		/* lowest usable address */
130 	0xffffffffU,	/* highest usable address */
131 	0xffffffffU,	/* maximum DMAable byte count */
132 	0x100,		/* alignment in bytes */
133 	0x100,		/* burst sizes (any?) */
134 	1,		/* minimum transfer */
135 	0xffffffffU,	/* maximum transfer */
136 	0xffffffffU,	/* maximum segment length */
137 	1,		/* maximum number of segments */
138 	1,		/* granularity */
139 	0,		/* flags (reserved) */
140 };
141 
142 /* DMA attributes for a cmd */
143 static ddi_dma_attr_t cmd_dma_attr = {
144 	DMA_ATTR_V0,	/* version of this structure */
145 	0,		/* lowest usable address */
146 	0xffffffffU,	/* highest usable address */
147 	0xffffffffU,	/* maximum DMAable byte count */
148 	4,		/* alignment in bytes */
149 	0x100,		/* burst sizes (any?) */
150 	1,		/* minimum transfer */
151 	0xffffffffU,	/* maximum transfer */
152 	0xffffffffU,	/* maximum segment length */
153 	1,		/* maximum number of segments */
154 	1,		/* granularity */
155 	0,		/* flags (reserved) */
156 };
157 
158 /* DMA attributes for a rx buffer */
159 static ddi_dma_attr_t rx_buffer_dma_attr = {
160 	DMA_ATTR_V0,	/* version of this structure */
161 	0,		/* lowest usable address */
162 	0xffffffffU,	/* highest usable address */
163 	0xffffffffU,	/* maximum DMAable byte count */
164 	0x100,		/* alignment in bytes */
165 	0x100,		/* burst sizes (any?) */
166 	1,		/* minimum transfer */
167 	0xffffffffU,	/* maximum transfer */
168 	0xffffffffU,	/* maximum segment length */
169 	1,		/* maximum number of segments */
170 	1,		/* granularity */
171 	0,		/* flags (reserved) */
172 };
173 
174 /*
175  * DMA attributes for a tx buffer.
176  * the maximum number of segments is 4 for the hardware.
177  * now all the wifi drivers put the whole frame in a single
178  * descriptor, so we define the maximum  number of segments 1,
179  * just the same as the rx_buffer. we consider leverage the HW
180  * ability in the future, that is why we don't define rx and tx
181  * buffer_dma_attr as the same.
182  */
183 static ddi_dma_attr_t tx_buffer_dma_attr = {
184 	DMA_ATTR_V0,	/* version of this structure */
185 	0,		/* lowest usable address */
186 	0xffffffffU,	/* highest usable address */
187 	0xffffffffU,	/* maximum DMAable byte count */
188 	4,		/* alignment in bytes */
189 	0x100,		/* burst sizes (any?) */
190 	1,		/* minimum transfer */
191 	0xffffffffU,	/* maximum transfer */
192 	0xffffffffU,	/* maximum segment length */
193 	1,		/* maximum number of segments */
194 	1,		/* granularity */
195 	0,		/* flags (reserved) */
196 };
197 
198 /* DMA attributes for text and data part in the firmware */
199 static ddi_dma_attr_t fw_dma_attr = {
200 	DMA_ATTR_V0,	/* version of this structure */
201 	0,		/* lowest usable address */
202 	0xffffffffU,	/* highest usable address */
203 	0x7fffffff,	/* maximum DMAable byte count */
204 	0x10,		/* alignment in bytes */
205 	0x100,		/* burst sizes (any?) */
206 	1,		/* minimum transfer */
207 	0xffffffffU,	/* maximum transfer */
208 	0xffffffffU,	/* maximum segment length */
209 	1,		/* maximum number of segments */
210 	1,		/* granularity */
211 	0,		/* flags (reserved) */
212 };
213 
214 
215 /* regs access attributes */
216 static ddi_device_acc_attr_t iwk_reg_accattr = {
217 	DDI_DEVICE_ATTR_V0,
218 	DDI_STRUCTURE_LE_ACC,
219 	DDI_STRICTORDER_ACC,
220 	DDI_DEFAULT_ACC
221 };
222 
223 /* DMA access attributes */
224 static ddi_device_acc_attr_t iwk_dma_accattr = {
225 	DDI_DEVICE_ATTR_V0,
226 	DDI_NEVERSWAP_ACC,
227 	DDI_STRICTORDER_ACC,
228 	DDI_DEFAULT_ACC
229 };
230 
231 static int	iwk_ring_init(iwk_sc_t *);
232 static void	iwk_ring_free(iwk_sc_t *);
233 static int	iwk_alloc_shared(iwk_sc_t *);
234 static void	iwk_free_shared(iwk_sc_t *);
235 static int	iwk_alloc_kw(iwk_sc_t *);
236 static void	iwk_free_kw(iwk_sc_t *);
237 static int	iwk_alloc_fw_dma(iwk_sc_t *);
238 static void	iwk_free_fw_dma(iwk_sc_t *);
239 static int	iwk_alloc_rx_ring(iwk_sc_t *);
240 static void	iwk_reset_rx_ring(iwk_sc_t *);
241 static void	iwk_free_rx_ring(iwk_sc_t *);
242 static int	iwk_alloc_tx_ring(iwk_sc_t *, iwk_tx_ring_t *,
243     int, int);
244 static void	iwk_reset_tx_ring(iwk_sc_t *, iwk_tx_ring_t *);
245 static void	iwk_free_tx_ring(iwk_sc_t *, iwk_tx_ring_t *);
246 
247 static ieee80211_node_t *iwk_node_alloc(ieee80211com_t *);
248 static void	iwk_node_free(ieee80211_node_t *);
249 static int	iwk_newstate(ieee80211com_t *, enum ieee80211_state, int);
250 static int	iwk_key_set(ieee80211com_t *, const struct ieee80211_key *,
251     const uint8_t mac[IEEE80211_ADDR_LEN]);
252 static void	iwk_mac_access_enter(iwk_sc_t *);
253 static void	iwk_mac_access_exit(iwk_sc_t *);
254 static uint32_t	iwk_reg_read(iwk_sc_t *, uint32_t);
255 static void	iwk_reg_write(iwk_sc_t *, uint32_t, uint32_t);
256 static void	iwk_reg_write_region_4(iwk_sc_t *, uint32_t,
257 		    uint32_t *, int);
258 static int	iwk_load_firmware(iwk_sc_t *);
259 static void	iwk_rx_intr(iwk_sc_t *, iwk_rx_desc_t *,
260 		    iwk_rx_data_t *);
261 static void	iwk_tx_intr(iwk_sc_t *, iwk_rx_desc_t *,
262 		    iwk_rx_data_t *);
263 static void	iwk_cmd_intr(iwk_sc_t *, iwk_rx_desc_t *);
264 static uint_t	iwk_intr(caddr_t);
265 static int	iwk_eep_load(iwk_sc_t *sc);
266 static void	iwk_get_mac_from_eep(iwk_sc_t *sc);
267 static int	iwk_eep_sem_down(iwk_sc_t *sc);
268 static void	iwk_eep_sem_up(iwk_sc_t *sc);
269 static uint_t	iwk_rx_softintr(caddr_t);
270 static uint8_t	iwk_rate_to_plcp(int);
271 static int	iwk_cmd(iwk_sc_t *, int, const void *, int, int);
272 static void	iwk_set_led(iwk_sc_t *, uint8_t, uint8_t, uint8_t);
273 static int	iwk_hw_set_before_auth(iwk_sc_t *);
274 static int	iwk_scan(iwk_sc_t *);
275 static int	iwk_config(iwk_sc_t *);
276 static void	iwk_stop_master(iwk_sc_t *);
277 static int	iwk_power_up(iwk_sc_t *);
278 static int	iwk_preinit(iwk_sc_t *);
279 static int	iwk_init(iwk_sc_t *);
280 static void	iwk_stop(iwk_sc_t *);
281 static void	iwk_amrr_init(iwk_amrr_t *);
282 static void	iwk_amrr_timeout(iwk_sc_t *);
283 static void	iwk_amrr_ratectl(void *, ieee80211_node_t *);
284 
285 static int iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
286 static int iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
287 
288 /*
289  * GLD specific operations
290  */
291 static int	iwk_m_stat(void *arg, uint_t stat, uint64_t *val);
292 static int	iwk_m_start(void *arg);
293 static void	iwk_m_stop(void *arg);
294 static int	iwk_m_unicst(void *arg, const uint8_t *macaddr);
295 static int	iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m);
296 static int	iwk_m_promisc(void *arg, boolean_t on);
297 static mblk_t  *iwk_m_tx(void *arg, mblk_t *mp);
298 static void	iwk_m_ioctl(void *arg, queue_t *wq, mblk_t *mp);
299 
300 static void	iwk_destroy_locks(iwk_sc_t *sc);
301 static int	iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type);
302 static void	iwk_thread(iwk_sc_t *sc);
303 
304 /*
305  * Supported rates for 802.11b/g modes (in 500Kbps unit).
306  * 11a and 11n support will be added later.
307  */
308 static const struct ieee80211_rateset iwk_rateset_11b =
309 	{ 4, { 2, 4, 11, 22 } };
310 
311 static const struct ieee80211_rateset iwk_rateset_11g =
312 	{ 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
313 
314 /*
315  * For mfthread only
316  */
317 extern pri_t minclsyspri;
318 
319 #define	DRV_NAME_4965	"iwk"
320 
321 /*
322  * Module Loading Data & Entry Points
323  */
324 DDI_DEFINE_STREAM_OPS(iwk_devops, nulldev, nulldev, iwk_attach,
325     iwk_detach, nodev, NULL, D_MP, NULL);
326 
327 static struct modldrv iwk_modldrv = {
328 	&mod_driverops,
329 	"Intel(R) 4965AGN driver(N)",
330 	&iwk_devops
331 };
332 
333 static struct modlinkage iwk_modlinkage = {
334 	MODREV_1,
335 	&iwk_modldrv,
336 	NULL
337 };
338 
339 int
340 _init(void)
341 {
342 	int	status;
343 
344 	status = ddi_soft_state_init(&iwk_soft_state_p,
345 	    sizeof (iwk_sc_t), 1);
346 	if (status != DDI_SUCCESS)
347 		return (status);
348 
349 	mac_init_ops(&iwk_devops, DRV_NAME_4965);
350 	status = mod_install(&iwk_modlinkage);
351 	if (status != DDI_SUCCESS) {
352 		mac_fini_ops(&iwk_devops);
353 		ddi_soft_state_fini(&iwk_soft_state_p);
354 	}
355 
356 	return (status);
357 }
358 
359 int
360 _fini(void)
361 {
362 	int status;
363 
364 	status = mod_remove(&iwk_modlinkage);
365 	if (status == DDI_SUCCESS) {
366 		mac_fini_ops(&iwk_devops);
367 		ddi_soft_state_fini(&iwk_soft_state_p);
368 	}
369 
370 	return (status);
371 }
372 
373 int
374 _info(struct modinfo *mip)
375 {
376 	return (mod_info(&iwk_modlinkage, mip));
377 }
378 
379 /*
380  * Mac Call Back entries
381  */
382 mac_callbacks_t	iwk_m_callbacks = {
383 	MC_IOCTL,
384 	iwk_m_stat,
385 	iwk_m_start,
386 	iwk_m_stop,
387 	iwk_m_promisc,
388 	iwk_m_multicst,
389 	iwk_m_unicst,
390 	iwk_m_tx,
391 	NULL,
392 	iwk_m_ioctl
393 };
394 
395 #ifdef DEBUG
396 void
397 iwk_dbg(uint32_t flags, const char *fmt, ...)
398 {
399 	va_list	ap;
400 
401 	if (flags & iwk_dbg_flags) {
402 		va_start(ap, fmt);
403 		vcmn_err(CE_NOTE, fmt, ap);
404 		va_end(ap);
405 	}
406 }
407 #endif
408 
409 /*
410  * device operations
411  */
412 int
413 iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
414 {
415 	iwk_sc_t		*sc;
416 	ieee80211com_t	*ic;
417 	int			instance, err, i;
418 	char			strbuf[32];
419 	wifi_data_t		wd = { 0 };
420 	mac_register_t		*macp;
421 
422 	switch (cmd) {
423 	case DDI_ATTACH:
424 		break;
425 	case DDI_RESUME:
426 		sc = ddi_get_soft_state(iwk_soft_state_p,
427 		    ddi_get_instance(dip));
428 		ASSERT(sc != NULL);
429 		mutex_enter(&sc->sc_glock);
430 		sc->sc_flags &= ~IWK_F_SUSPEND;
431 		mutex_exit(&sc->sc_glock);
432 		if (sc->sc_flags & IWK_F_RUNNING) {
433 			(void) iwk_init(sc);
434 			ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
435 		}
436 		IWK_DBG((IWK_DEBUG_RESUME, "iwk: resume\n"));
437 		return (DDI_SUCCESS);
438 	default:
439 		err = DDI_FAILURE;
440 		goto attach_fail1;
441 	}
442 
443 	instance = ddi_get_instance(dip);
444 	err = ddi_soft_state_zalloc(iwk_soft_state_p, instance);
445 	if (err != DDI_SUCCESS) {
446 		cmn_err(CE_WARN,
447 		    "iwk_attach(): failed to allocate soft state\n");
448 		goto attach_fail1;
449 	}
450 	sc = ddi_get_soft_state(iwk_soft_state_p, instance);
451 	sc->sc_dip = dip;
452 
453 	err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0,
454 	    &iwk_reg_accattr, &sc->sc_cfg_handle);
455 	if (err != DDI_SUCCESS) {
456 		cmn_err(CE_WARN,
457 		    "iwk_attach(): failed to map config spaces regs\n");
458 		goto attach_fail2;
459 	}
460 	sc->sc_rev = ddi_get8(sc->sc_cfg_handle,
461 	    (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID));
462 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0x41), 0);
463 	sc->sc_clsz = ddi_get16(sc->sc_cfg_handle,
464 	    (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ));
465 	if (!sc->sc_clsz)
466 		sc->sc_clsz = 16;
467 	sc->sc_clsz = (sc->sc_clsz << 2);
468 	sc->sc_dmabuf_sz = roundup(0x1000 + sizeof (struct ieee80211_frame) +
469 	    IEEE80211_MTU + IEEE80211_CRC_LEN +
470 	    (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
471 	    IEEE80211_WEP_CRCLEN), sc->sc_clsz);
472 	/*
473 	 * Map operating registers
474 	 */
475 	err = ddi_regs_map_setup(dip, 1, &sc->sc_base,
476 	    0, 0, &iwk_reg_accattr, &sc->sc_handle);
477 	if (err != DDI_SUCCESS) {
478 		cmn_err(CE_WARN,
479 		    "iwk_attach(): failed to map device regs\n");
480 		goto attach_fail2a;
481 	}
482 
483 	/*
484 	 * Initialize mutexs and condvars
485 	 */
486 	err = ddi_get_iblock_cookie(dip, 0, &sc->sc_iblk);
487 	if (err != DDI_SUCCESS) {
488 		cmn_err(CE_WARN,
489 		    "iwk_attach(): failed to do ddi_get_iblock_cookie()\n");
490 		goto attach_fail2b;
491 	}
492 	mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER, sc->sc_iblk);
493 	mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER, sc->sc_iblk);
494 	cv_init(&sc->sc_fw_cv, NULL, CV_DRIVER, NULL);
495 	cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL);
496 	cv_init(&sc->sc_tx_cv, "tx-ring", CV_DRIVER, NULL);
497 	/*
498 	 * initialize the mfthread
499 	 */
500 	mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER,
501 	    (void *) sc->sc_iblk);
502 	cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL);
503 	sc->sc_mf_thread = NULL;
504 	sc->sc_mf_thread_switch = 0;
505 
506 	/*
507 	 * Allocate shared page.
508 	 */
509 	err = iwk_alloc_shared(sc);
510 	if (err != DDI_SUCCESS) {
511 		cmn_err(CE_WARN, "failed to allocate shared page\n");
512 		goto attach_fail3;
513 	}
514 
515 	/*
516 	 * Allocate keep warm page.
517 	 */
518 	err = iwk_alloc_kw(sc);
519 	if (err != DDI_SUCCESS) {
520 		cmn_err(CE_WARN, "failed to allocate keep warm page\n");
521 		goto attach_fail3a;
522 	}
523 
524 	/*
525 	 * Do some necessary hardware initializations.
526 	 */
527 	err = iwk_preinit(sc);
528 	if (err != DDI_SUCCESS) {
529 		cmn_err(CE_WARN, "failed to init hardware\n");
530 		goto attach_fail4;
531 	}
532 
533 	/* initialize EEPROM */
534 	err = iwk_eep_load(sc);  /* get hardware configurations from eeprom */
535 	if (err != 0) {
536 		cmn_err(CE_WARN, "iwk_attach(): failed to load eeprom\n");
537 		goto attach_fail4;
538 	}
539 
540 	if (sc->sc_eep_map.calib_version < EEP_TX_POWER_VERSION_NEW) {
541 		IWK_DBG((IWK_DEBUG_EEPROM, "older EEPROM detected"));
542 		goto attach_fail4;
543 	}
544 
545 	iwk_get_mac_from_eep(sc);
546 
547 	err = iwk_ring_init(sc);
548 	if (err != DDI_SUCCESS) {
549 		cmn_err(CE_WARN, "iwk_attach(): "
550 		    "failed to allocate and initialize ring\n");
551 		goto attach_fail4;
552 	}
553 
554 	sc->sc_hdr = (iwk_firmware_hdr_t *)iwk_fw_bin;
555 
556 	err = iwk_alloc_fw_dma(sc);
557 	if (err != DDI_SUCCESS) {
558 		cmn_err(CE_WARN, "iwk_attach(): "
559 		    "failed to allocate firmware dma\n");
560 		goto attach_fail5;
561 	}
562 
563 	/*
564 	 * Initialize the wifi part, which will be used by
565 	 * generic layer
566 	 */
567 	ic = &sc->sc_ic;
568 	ic->ic_phytype  = IEEE80211_T_OFDM;
569 	ic->ic_opmode   = IEEE80211_M_STA; /* default to BSS mode */
570 	ic->ic_state    = IEEE80211_S_INIT;
571 	ic->ic_maxrssi  = 100; /* experimental number */
572 	ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT |
573 	    IEEE80211_C_PMGT | IEEE80211_C_SHSLOT;
574 	/*
575 	 * use software WEP and TKIP, hardware CCMP;
576 	 */
577 	ic->ic_caps |= IEEE80211_C_AES_CCM;
578 	/*
579 	 * Support WPA/WPA2
580 	 */
581 	ic->ic_caps |= IEEE80211_C_WPA;
582 	/* set supported .11b and .11g rates */
583 	ic->ic_sup_rates[IEEE80211_MODE_11B] = iwk_rateset_11b;
584 	ic->ic_sup_rates[IEEE80211_MODE_11G] = iwk_rateset_11g;
585 
586 	/* set supported .11b and .11g channels (1 through 14) */
587 	for (i = 1; i <= 14; i++) {
588 		ic->ic_sup_channels[i].ich_freq =
589 		    ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
590 		ic->ic_sup_channels[i].ich_flags =
591 		    IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
592 		    IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
593 	}
594 	ic->ic_ibss_chan = &ic->ic_sup_channels[0];
595 	ic->ic_xmit = iwk_send;
596 	/*
597 	 * init Wifi layer
598 	 */
599 	ieee80211_attach(ic);
600 
601 	/*
602 	 * different instance has different WPA door
603 	 */
604 	(void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
605 	    ddi_driver_name(dip),
606 	    ddi_get_instance(dip));
607 
608 	/*
609 	 * Override 80211 default routines
610 	 */
611 	sc->sc_newstate = ic->ic_newstate;
612 	ic->ic_newstate = iwk_newstate;
613 	ic->ic_node_alloc = iwk_node_alloc;
614 	ic->ic_node_free = iwk_node_free;
615 	ic->ic_crypto.cs_key_set = iwk_key_set;
616 	ieee80211_media_init(ic);
617 	/*
618 	 * initialize default tx key
619 	 */
620 	ic->ic_def_txkey = 0;
621 
622 	err = ddi_add_softintr(dip, DDI_SOFTINT_LOW,
623 	    &sc->sc_rx_softint_id, &sc->sc_iblk, NULL, iwk_rx_softintr,
624 	    (caddr_t)sc);
625 	if (err != DDI_SUCCESS) {
626 		cmn_err(CE_WARN,
627 		    "iwk_attach(): failed to do ddi_add_softintr()\n");
628 		goto attach_fail7;
629 	}
630 
631 	/*
632 	 * Add the interrupt handler
633 	 */
634 	err = ddi_add_intr(dip, 0, &sc->sc_iblk, NULL,
635 	    iwk_intr, (caddr_t)sc);
636 	if (err != DDI_SUCCESS) {
637 		cmn_err(CE_WARN,
638 		    "iwk_attach(): failed to do ddi_add_intr()\n");
639 		goto attach_fail8;
640 	}
641 
642 	/*
643 	 * Initialize pointer to device specific functions
644 	 */
645 	wd.wd_secalloc = WIFI_SEC_NONE;
646 	wd.wd_opmode = ic->ic_opmode;
647 	IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr);
648 
649 	macp = mac_alloc(MAC_VERSION);
650 	if (err != DDI_SUCCESS) {
651 		cmn_err(CE_WARN,
652 		    "iwk_attach(): failed to do mac_alloc()\n");
653 		goto attach_fail9;
654 	}
655 
656 	macp->m_type_ident	= MAC_PLUGIN_IDENT_WIFI;
657 	macp->m_driver		= sc;
658 	macp->m_dip		= dip;
659 	macp->m_src_addr	= ic->ic_macaddr;
660 	macp->m_callbacks	= &iwk_m_callbacks;
661 	macp->m_min_sdu		= 0;
662 	macp->m_max_sdu		= IEEE80211_MTU;
663 	macp->m_pdata		= &wd;
664 	macp->m_pdata_size	= sizeof (wd);
665 
666 	/*
667 	 * Register the macp to mac
668 	 */
669 	err = mac_register(macp, &ic->ic_mach);
670 	mac_free(macp);
671 	if (err != DDI_SUCCESS) {
672 		cmn_err(CE_WARN,
673 		    "iwk_attach(): failed to do mac_register()\n");
674 		goto attach_fail9;
675 	}
676 
677 	/*
678 	 * Create minor node of type DDI_NT_NET_WIFI
679 	 */
680 	(void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_4965"%d", instance);
681 	err = ddi_create_minor_node(dip, strbuf, S_IFCHR,
682 	    instance + 1, DDI_NT_NET_WIFI, 0);
683 	if (err != DDI_SUCCESS)
684 		cmn_err(CE_WARN,
685 		    "iwk_attach(): failed to do ddi_create_minor_node()\n");
686 
687 	/*
688 	 * Notify link is down now
689 	 */
690 	mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
691 
692 	/*
693 	 * create the mf thread to handle the link status,
694 	 * recovery fatal error, etc.
695 	 */
696 
697 	sc->sc_mf_thread_switch = 1;
698 	if (sc->sc_mf_thread == NULL)
699 		sc->sc_mf_thread = thread_create((caddr_t)NULL, 0,
700 		    iwk_thread, sc, 0, &p0, TS_RUN, minclsyspri);
701 
702 	sc->sc_flags |= IWK_F_ATTACHED;
703 
704 	return (DDI_SUCCESS);
705 attach_fail9:
706 	ddi_remove_intr(dip, 0, sc->sc_iblk);
707 attach_fail8:
708 	ddi_remove_softintr(sc->sc_rx_softint_id);
709 	sc->sc_rx_softint_id = NULL;
710 attach_fail7:
711 	ieee80211_detach(ic);
712 attach_fail6:
713 	iwk_free_fw_dma(sc);
714 attach_fail5:
715 	iwk_ring_free(sc);
716 attach_fail4:
717 	iwk_free_kw(sc);
718 attach_fail3a:
719 	iwk_free_shared(sc);
720 attach_fail3:
721 	iwk_destroy_locks(sc);
722 attach_fail2b:
723 	ddi_regs_map_free(&sc->sc_handle);
724 attach_fail2a:
725 	ddi_regs_map_free(&sc->sc_cfg_handle);
726 attach_fail2:
727 	ddi_soft_state_free(iwk_soft_state_p, instance);
728 attach_fail1:
729 	return (err);
730 }
731 
732 int
733 iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
734 {
735 	iwk_sc_t	*sc;
736 	int err;
737 
738 	sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip));
739 	ASSERT(sc != NULL);
740 
741 	switch (cmd) {
742 	case DDI_DETACH:
743 		break;
744 	case DDI_SUSPEND:
745 		if (sc->sc_flags & IWK_F_RUNNING) {
746 			iwk_stop(sc);
747 		}
748 		mutex_enter(&sc->sc_glock);
749 		sc->sc_flags |= IWK_F_SUSPEND;
750 		mutex_exit(&sc->sc_glock);
751 		IWK_DBG((IWK_DEBUG_RESUME, "iwk: suspend\n"));
752 		return (DDI_SUCCESS);
753 	default:
754 		return (DDI_FAILURE);
755 	}
756 
757 	if (!(sc->sc_flags & IWK_F_ATTACHED))
758 		return (DDI_FAILURE);
759 
760 	/*
761 	 * Destroy the mf_thread
762 	 */
763 	mutex_enter(&sc->sc_mt_lock);
764 	sc->sc_mf_thread_switch = 0;
765 	while (sc->sc_mf_thread != NULL) {
766 		if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0)
767 			break;
768 	}
769 	mutex_exit(&sc->sc_mt_lock);
770 
771 	iwk_stop(sc);
772 	DELAY(500000);
773 
774 	/*
775 	 * Unregiste from the MAC layer subsystem
776 	 */
777 	err = mac_unregister(sc->sc_ic.ic_mach);
778 	if (err != DDI_SUCCESS)
779 		return (err);
780 
781 	mutex_enter(&sc->sc_glock);
782 	iwk_free_fw_dma(sc);
783 	iwk_ring_free(sc);
784 	iwk_free_kw(sc);
785 	iwk_free_shared(sc);
786 	mutex_exit(&sc->sc_glock);
787 
788 	ddi_remove_intr(dip, 0, sc->sc_iblk);
789 	ddi_remove_softintr(sc->sc_rx_softint_id);
790 	sc->sc_rx_softint_id = NULL;
791 
792 	/*
793 	 * detach ieee80211
794 	 */
795 	ieee80211_detach(&sc->sc_ic);
796 
797 	iwk_destroy_locks(sc);
798 
799 	ddi_regs_map_free(&sc->sc_handle);
800 	ddi_regs_map_free(&sc->sc_cfg_handle);
801 	ddi_remove_minor_node(dip, NULL);
802 	ddi_soft_state_free(iwk_soft_state_p, ddi_get_instance(dip));
803 
804 	return (DDI_SUCCESS);
805 }
806 
807 static void
808 iwk_destroy_locks(iwk_sc_t *sc)
809 {
810 	cv_destroy(&sc->sc_mt_cv);
811 	mutex_destroy(&sc->sc_mt_lock);
812 	cv_destroy(&sc->sc_tx_cv);
813 	cv_destroy(&sc->sc_cmd_cv);
814 	cv_destroy(&sc->sc_fw_cv);
815 	mutex_destroy(&sc->sc_tx_lock);
816 	mutex_destroy(&sc->sc_glock);
817 }
818 
819 /*
820  * Allocate an area of memory and a DMA handle for accessing it
821  */
822 static int
823 iwk_alloc_dma_mem(iwk_sc_t *sc, size_t memsize,
824     ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p,
825     uint_t dma_flags, iwk_dma_t *dma_p)
826 {
827 	caddr_t vaddr;
828 	int err;
829 
830 	/*
831 	 * Allocate handle
832 	 */
833 	err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p,
834 	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
835 	if (err != DDI_SUCCESS) {
836 		dma_p->dma_hdl = NULL;
837 		return (DDI_FAILURE);
838 	}
839 
840 	/*
841 	 * Allocate memory
842 	 */
843 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
844 	    dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
845 	    DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
846 	if (err != DDI_SUCCESS) {
847 		ddi_dma_free_handle(&dma_p->dma_hdl);
848 		dma_p->dma_hdl = NULL;
849 		dma_p->acc_hdl = NULL;
850 		return (DDI_FAILURE);
851 	}
852 
853 	/*
854 	 * Bind the two together
855 	 */
856 	dma_p->mem_va = vaddr;
857 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
858 	    vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
859 	    &dma_p->cookie, &dma_p->ncookies);
860 	if (err != DDI_DMA_MAPPED) {
861 		ddi_dma_mem_free(&dma_p->acc_hdl);
862 		ddi_dma_free_handle(&dma_p->dma_hdl);
863 		dma_p->acc_hdl = NULL;
864 		dma_p->dma_hdl = NULL;
865 		return (DDI_FAILURE);
866 	}
867 
868 	dma_p->nslots = ~0U;
869 	dma_p->size = ~0U;
870 	dma_p->token = ~0U;
871 	dma_p->offset = 0;
872 	return (DDI_SUCCESS);
873 }
874 
875 /*
876  * Free one allocated area of DMAable memory
877  */
878 static void
879 iwk_free_dma_mem(iwk_dma_t *dma_p)
880 {
881 	if (dma_p->dma_hdl != NULL) {
882 		if (dma_p->ncookies) {
883 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
884 			dma_p->ncookies = 0;
885 		}
886 		ddi_dma_free_handle(&dma_p->dma_hdl);
887 		dma_p->dma_hdl = NULL;
888 	}
889 
890 	if (dma_p->acc_hdl != NULL) {
891 		ddi_dma_mem_free(&dma_p->acc_hdl);
892 		dma_p->acc_hdl = NULL;
893 	}
894 }
895 
896 /*
897  *
898  */
899 static int
900 iwk_alloc_fw_dma(iwk_sc_t *sc)
901 {
902 	int err = DDI_SUCCESS;
903 	iwk_dma_t *dma_p;
904 	char *t;
905 
906 	/*
907 	 * firmware image layout:
908 	 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->|
909 	 */
910 	t = (char *)(sc->sc_hdr + 1);
911 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz),
912 	    &fw_dma_attr, &iwk_dma_accattr,
913 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
914 	    &sc->sc_dma_fw_text);
915 	dma_p = &sc->sc_dma_fw_text;
916 	IWK_DBG((IWK_DEBUG_DMA, "text[ncookies:%d addr:%lx size:%lx]\n",
917 	    dma_p->ncookies, dma_p->cookie.dmac_address,
918 	    dma_p->cookie.dmac_size));
919 	if (err != DDI_SUCCESS) {
920 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
921 		    " text dma memory");
922 		goto fail;
923 	}
924 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->textsz));
925 
926 	t += LE_32(sc->sc_hdr->textsz);
927 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
928 	    &fw_dma_attr, &iwk_dma_accattr,
929 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
930 	    &sc->sc_dma_fw_data);
931 	dma_p = &sc->sc_dma_fw_data;
932 	IWK_DBG((IWK_DEBUG_DMA, "data[ncookies:%d addr:%lx size:%lx]\n",
933 	    dma_p->ncookies, dma_p->cookie.dmac_address,
934 	    dma_p->cookie.dmac_size));
935 	if (err != DDI_SUCCESS) {
936 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
937 		    " data dma memory");
938 		goto fail;
939 	}
940 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
941 
942 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
943 	    &fw_dma_attr, &iwk_dma_accattr,
944 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
945 	    &sc->sc_dma_fw_data_bak);
946 	dma_p = &sc->sc_dma_fw_data_bak;
947 	IWK_DBG((IWK_DEBUG_DMA, "data_bak[ncookies:%d addr:%lx "
948 	    "size:%lx]\n",
949 	    dma_p->ncookies, dma_p->cookie.dmac_address,
950 	    dma_p->cookie.dmac_size));
951 	if (err != DDI_SUCCESS) {
952 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
953 		    " data bakeup dma memory");
954 		goto fail;
955 	}
956 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
957 
958 	t += LE_32(sc->sc_hdr->datasz);
959 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz),
960 	    &fw_dma_attr, &iwk_dma_accattr,
961 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
962 	    &sc->sc_dma_fw_init_text);
963 	dma_p = &sc->sc_dma_fw_init_text;
964 	IWK_DBG((IWK_DEBUG_DMA, "init_text[ncookies:%d addr:%lx "
965 	    "size:%lx]\n",
966 	    dma_p->ncookies, dma_p->cookie.dmac_address,
967 	    dma_p->cookie.dmac_size));
968 	if (err != DDI_SUCCESS) {
969 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
970 		    "init text dma memory");
971 		goto fail;
972 	}
973 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_textsz));
974 
975 	t += LE_32(sc->sc_hdr->init_textsz);
976 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz),
977 	    &fw_dma_attr, &iwk_dma_accattr,
978 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
979 	    &sc->sc_dma_fw_init_data);
980 	dma_p = &sc->sc_dma_fw_init_data;
981 	IWK_DBG((IWK_DEBUG_DMA, "init_data[ncookies:%d addr:%lx "
982 	    "size:%lx]\n",
983 	    dma_p->ncookies, dma_p->cookie.dmac_address,
984 	    dma_p->cookie.dmac_size));
985 	if (err != DDI_SUCCESS) {
986 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
987 		    "init data dma memory");
988 		goto fail;
989 	}
990 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_datasz));
991 
992 	sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz);
993 fail:
994 	return (err);
995 }
996 
997 static void
998 iwk_free_fw_dma(iwk_sc_t *sc)
999 {
1000 	iwk_free_dma_mem(&sc->sc_dma_fw_text);
1001 	iwk_free_dma_mem(&sc->sc_dma_fw_data);
1002 	iwk_free_dma_mem(&sc->sc_dma_fw_data_bak);
1003 	iwk_free_dma_mem(&sc->sc_dma_fw_init_text);
1004 	iwk_free_dma_mem(&sc->sc_dma_fw_init_data);
1005 }
1006 
1007 /*
1008  * Allocate a shared page between host and NIC.
1009  */
1010 static int
1011 iwk_alloc_shared(iwk_sc_t *sc)
1012 {
1013 	iwk_dma_t *dma_p;
1014 	int err = DDI_SUCCESS;
1015 
1016 	/* must be aligned on a 4K-page boundary */
1017 	err = iwk_alloc_dma_mem(sc, sizeof (iwk_shared_t),
1018 	    &sh_dma_attr, &iwk_dma_accattr,
1019 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1020 	    &sc->sc_dma_sh);
1021 	if (err != DDI_SUCCESS)
1022 		goto fail;
1023 	sc->sc_shared = (iwk_shared_t *)sc->sc_dma_sh.mem_va;
1024 
1025 	dma_p = &sc->sc_dma_sh;
1026 	IWK_DBG((IWK_DEBUG_DMA, "sh[ncookies:%d addr:%lx size:%lx]\n",
1027 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1028 	    dma_p->cookie.dmac_size));
1029 
1030 	return (err);
1031 fail:
1032 	iwk_free_shared(sc);
1033 	return (err);
1034 }
1035 
1036 static void
1037 iwk_free_shared(iwk_sc_t *sc)
1038 {
1039 	iwk_free_dma_mem(&sc->sc_dma_sh);
1040 }
1041 
1042 /*
1043  * Allocate a keep warm page.
1044  */
1045 static int
1046 iwk_alloc_kw(iwk_sc_t *sc)
1047 {
1048 	iwk_dma_t *dma_p;
1049 	int err = DDI_SUCCESS;
1050 
1051 	/* must be aligned on a 4K-page boundary */
1052 	err = iwk_alloc_dma_mem(sc, IWK_KW_SIZE,
1053 	    &kw_dma_attr, &iwk_dma_accattr,
1054 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1055 	    &sc->sc_dma_kw);
1056 	if (err != DDI_SUCCESS)
1057 		goto fail;
1058 
1059 	dma_p = &sc->sc_dma_kw;
1060 	IWK_DBG((IWK_DEBUG_DMA, "kw[ncookies:%d addr:%lx size:%lx]\n",
1061 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1062 	    dma_p->cookie.dmac_size));
1063 
1064 	return (err);
1065 fail:
1066 	iwk_free_kw(sc);
1067 	return (err);
1068 }
1069 
1070 static void
1071 iwk_free_kw(iwk_sc_t *sc)
1072 {
1073 	iwk_free_dma_mem(&sc->sc_dma_kw);
1074 }
1075 
1076 static int
1077 iwk_alloc_rx_ring(iwk_sc_t *sc)
1078 {
1079 	iwk_rx_ring_t *ring;
1080 	iwk_rx_data_t *data;
1081 	iwk_dma_t *dma_p;
1082 	int i, err = DDI_SUCCESS;
1083 
1084 	ring = &sc->sc_rxq;
1085 	ring->cur = 0;
1086 
1087 	err = iwk_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t),
1088 	    &ring_desc_dma_attr, &iwk_dma_accattr,
1089 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1090 	    &ring->dma_desc);
1091 	if (err != DDI_SUCCESS) {
1092 		IWK_DBG((IWK_DEBUG_DMA, "dma alloc rx ring desc "
1093 		    "failed\n"));
1094 		goto fail;
1095 	}
1096 	ring->desc = (uint32_t *)ring->dma_desc.mem_va;
1097 	dma_p = &ring->dma_desc;
1098 	IWK_DBG((IWK_DEBUG_DMA, "rx bd[ncookies:%d addr:%lx size:%lx]\n",
1099 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1100 	    dma_p->cookie.dmac_size));
1101 
1102 	/*
1103 	 * Allocate Rx buffers.
1104 	 */
1105 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1106 		data = &ring->data[i];
1107 		err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1108 		    &rx_buffer_dma_attr, &iwk_dma_accattr,
1109 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1110 		    &data->dma_data);
1111 		if (err != DDI_SUCCESS) {
1112 			IWK_DBG((IWK_DEBUG_DMA, "dma alloc rx ring "
1113 			    "buf[%d] failed\n", i));
1114 			goto fail;
1115 		}
1116 		/*
1117 		 * the physical address bit [8-36] are used,
1118 		 * instead of bit [0-31] in 3945.
1119 		 */
1120 		ring->desc[i] = LE_32((uint32_t)
1121 		    (data->dma_data.cookie.dmac_address >> 8));
1122 	}
1123 	dma_p = &ring->data[0].dma_data;
1124 	IWK_DBG((IWK_DEBUG_DMA, "rx buffer[0][ncookies:%d addr:%lx "
1125 	    "size:%lx]\n",
1126 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1127 	    dma_p->cookie.dmac_size));
1128 
1129 	IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1130 
1131 	return (err);
1132 
1133 fail:
1134 	iwk_free_rx_ring(sc);
1135 	return (err);
1136 }
1137 
1138 static void
1139 iwk_reset_rx_ring(iwk_sc_t *sc)
1140 {
1141 	int n;
1142 
1143 	iwk_mac_access_enter(sc);
1144 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1145 	for (n = 0; n < 2000; n++) {
1146 		if (IWK_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24))
1147 			break;
1148 		DELAY(1000);
1149 	}
1150 #ifdef DEBUG
1151 	if (n == 2000)
1152 		IWK_DBG((IWK_DEBUG_DMA, "timeout resetting Rx ring\n"));
1153 #endif
1154 	iwk_mac_access_exit(sc);
1155 
1156 	sc->sc_rxq.cur = 0;
1157 }
1158 
1159 static void
1160 iwk_free_rx_ring(iwk_sc_t *sc)
1161 {
1162 	int i;
1163 
1164 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1165 		if (sc->sc_rxq.data[i].dma_data.dma_hdl)
1166 			IWK_DMA_SYNC(sc->sc_rxq.data[i].dma_data,
1167 			    DDI_DMA_SYNC_FORCPU);
1168 		iwk_free_dma_mem(&sc->sc_rxq.data[i].dma_data);
1169 	}
1170 
1171 	if (sc->sc_rxq.dma_desc.dma_hdl)
1172 		IWK_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV);
1173 	iwk_free_dma_mem(&sc->sc_rxq.dma_desc);
1174 }
1175 
1176 static int
1177 iwk_alloc_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring,
1178     int slots, int qid)
1179 {
1180 	iwk_tx_data_t *data;
1181 	iwk_tx_desc_t *desc_h;
1182 	uint32_t paddr_desc_h;
1183 	iwk_cmd_t *cmd_h;
1184 	uint32_t paddr_cmd_h;
1185 	iwk_dma_t *dma_p;
1186 	int i, err = DDI_SUCCESS;
1187 
1188 	ring->qid = qid;
1189 	ring->count = TFD_QUEUE_SIZE_MAX;
1190 	ring->window = slots;
1191 	ring->queued = 0;
1192 	ring->cur = 0;
1193 
1194 	err = iwk_alloc_dma_mem(sc,
1195 	    TFD_QUEUE_SIZE_MAX * sizeof (iwk_tx_desc_t),
1196 	    &ring_desc_dma_attr, &iwk_dma_accattr,
1197 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1198 	    &ring->dma_desc);
1199 	if (err != DDI_SUCCESS) {
1200 		IWK_DBG((IWK_DEBUG_DMA, "dma alloc tx ring desc[%d]"
1201 		    " failed\n", qid));
1202 		goto fail;
1203 	}
1204 	dma_p = &ring->dma_desc;
1205 	IWK_DBG((IWK_DEBUG_DMA, "tx bd[ncookies:%d addr:%lx size:%lx]\n",
1206 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1207 	    dma_p->cookie.dmac_size));
1208 
1209 	desc_h = (iwk_tx_desc_t *)ring->dma_desc.mem_va;
1210 	paddr_desc_h = ring->dma_desc.cookie.dmac_address;
1211 
1212 	err = iwk_alloc_dma_mem(sc,
1213 	    TFD_QUEUE_SIZE_MAX * sizeof (iwk_cmd_t),
1214 	    &cmd_dma_attr, &iwk_dma_accattr,
1215 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1216 	    &ring->dma_cmd);
1217 	if (err != DDI_SUCCESS) {
1218 		IWK_DBG((IWK_DEBUG_DMA, "dma alloc tx ring cmd[%d]"
1219 		    " failed\n", qid));
1220 		goto fail;
1221 	}
1222 	dma_p = &ring->dma_cmd;
1223 	IWK_DBG((IWK_DEBUG_DMA, "tx cmd[ncookies:%d addr:%lx size:%lx]\n",
1224 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1225 	    dma_p->cookie.dmac_size));
1226 
1227 	cmd_h = (iwk_cmd_t *)ring->dma_cmd.mem_va;
1228 	paddr_cmd_h = ring->dma_cmd.cookie.dmac_address;
1229 
1230 	/*
1231 	 * Allocate Tx buffers.
1232 	 */
1233 	ring->data = kmem_zalloc(sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX,
1234 	    KM_NOSLEEP);
1235 	if (ring->data == NULL) {
1236 		IWK_DBG((IWK_DEBUG_DMA, "could not allocate "
1237 		    "tx data slots\n"));
1238 		goto fail;
1239 	}
1240 
1241 	for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) {
1242 		data = &ring->data[i];
1243 		err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1244 		    &tx_buffer_dma_attr, &iwk_dma_accattr,
1245 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
1246 		    &data->dma_data);
1247 		if (err != DDI_SUCCESS) {
1248 			IWK_DBG((IWK_DEBUG_DMA, "dma alloc tx "
1249 			    "ring buf[%d] failed\n", i));
1250 			goto fail;
1251 		}
1252 
1253 		data->desc = desc_h + i;
1254 		data->paddr_desc = paddr_desc_h +
1255 		    ((caddr_t)data->desc - (caddr_t)desc_h);
1256 		data->cmd = cmd_h +  i; /* (i % slots); */
1257 		data->paddr_cmd = paddr_cmd_h +
1258 		    ((caddr_t)data->cmd - (caddr_t)cmd_h);
1259 		    /* ((i % slots) * sizeof (iwk_cmd_t)); */
1260 	}
1261 	dma_p = &ring->data[0].dma_data;
1262 	IWK_DBG((IWK_DEBUG_DMA, "tx buffer[0][ncookies:%d addr:%lx "
1263 	    "size:%lx]\n",
1264 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1265 	    dma_p->cookie.dmac_size));
1266 
1267 	return (err);
1268 
1269 fail:
1270 	if (ring->data)
1271 		kmem_free(ring->data,
1272 		    sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX);
1273 	iwk_free_tx_ring(sc, ring);
1274 	return (err);
1275 }
1276 
1277 static void
1278 iwk_reset_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring)
1279 {
1280 	iwk_tx_data_t *data;
1281 	int i, n;
1282 
1283 	iwk_mac_access_enter(sc);
1284 
1285 	IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0);
1286 	for (n = 0; n < 200; n++) {
1287 		if (IWK_READ(sc, IWK_FH_TSSR_TX_STATUS_REG) &
1288 		    IWK_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid))
1289 			break;
1290 		DELAY(10);
1291 	}
1292 #ifdef DEBUG
1293 	if (n == 200 && iwk_dbg_flags > 0) {
1294 		IWK_DBG((IWK_DEBUG_DMA, "timeout reset tx ring %d\n",
1295 		    ring->qid));
1296 	}
1297 #endif
1298 	iwk_mac_access_exit(sc);
1299 
1300 	for (i = 0; i < ring->count; i++) {
1301 		data = &ring->data[i];
1302 		IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
1303 	}
1304 
1305 	ring->queued = 0;
1306 	ring->cur = 0;
1307 }
1308 
1309 /*ARGSUSED*/
1310 static void
1311 iwk_free_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring)
1312 {
1313 	int i;
1314 
1315 	if (ring->dma_desc.dma_hdl != NULL)
1316 		IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1317 	iwk_free_dma_mem(&ring->dma_desc);
1318 
1319 	if (ring->dma_cmd.dma_hdl != NULL)
1320 		IWK_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV);
1321 	iwk_free_dma_mem(&ring->dma_cmd);
1322 
1323 	if (ring->data != NULL) {
1324 		for (i = 0; i < ring->count; i++) {
1325 			if (ring->data[i].dma_data.dma_hdl)
1326 				IWK_DMA_SYNC(ring->data[i].dma_data,
1327 				    DDI_DMA_SYNC_FORDEV);
1328 			iwk_free_dma_mem(&ring->data[i].dma_data);
1329 		}
1330 		kmem_free(ring->data, ring->count * sizeof (iwk_tx_data_t));
1331 	}
1332 }
1333 
1334 static int
1335 iwk_ring_init(iwk_sc_t *sc)
1336 {
1337 	int i, err = DDI_SUCCESS;
1338 
1339 	for (i = 0; i < IWK_NUM_QUEUES; i++) {
1340 		if (i == IWK_CMD_QUEUE_NUM)
1341 			continue;
1342 		err = iwk_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS,
1343 		    i);
1344 		if (err != DDI_SUCCESS)
1345 			goto fail;
1346 	}
1347 	err = iwk_alloc_tx_ring(sc, &sc->sc_txq[IWK_CMD_QUEUE_NUM],
1348 	    TFD_CMD_SLOTS, IWK_CMD_QUEUE_NUM);
1349 	if (err != DDI_SUCCESS)
1350 		goto fail;
1351 	err = iwk_alloc_rx_ring(sc);
1352 	if (err != DDI_SUCCESS)
1353 		goto fail;
1354 	return (err);
1355 
1356 fail:
1357 	return (err);
1358 }
1359 
1360 static void
1361 iwk_ring_free(iwk_sc_t *sc)
1362 {
1363 	int i = IWK_NUM_QUEUES;
1364 
1365 	iwk_free_rx_ring(sc);
1366 	while (--i >= 0) {
1367 		iwk_free_tx_ring(sc, &sc->sc_txq[i]);
1368 	}
1369 }
1370 
1371 /* ARGSUSED */
1372 static ieee80211_node_t *
1373 iwk_node_alloc(ieee80211com_t *ic)
1374 {
1375 	iwk_amrr_t *amrr;
1376 
1377 	amrr = kmem_zalloc(sizeof (iwk_amrr_t), KM_SLEEP);
1378 	if (amrr != NULL)
1379 		iwk_amrr_init(amrr);
1380 	return (&amrr->in);
1381 }
1382 
1383 static void
1384 iwk_node_free(ieee80211_node_t *in)
1385 {
1386 	ieee80211com_t *ic = in->in_ic;
1387 
1388 	ic->ic_node_cleanup(in);
1389 	if (in->in_wpa_ie != NULL)
1390 		ieee80211_free(in->in_wpa_ie);
1391 	kmem_free(in, sizeof (iwk_amrr_t));
1392 }
1393 
1394 /*ARGSUSED*/
1395 static int
1396 iwk_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1397 {
1398 	iwk_sc_t *sc = (iwk_sc_t *)ic;
1399 	ieee80211_node_t *in = ic->ic_bss;
1400 	iwk_tx_power_table_cmd_t txpower;
1401 	enum ieee80211_state ostate = ic->ic_state;
1402 	int i, err = IWK_SUCCESS;
1403 
1404 	mutex_enter(&sc->sc_glock);
1405 	switch (nstate) {
1406 	case IEEE80211_S_SCAN:
1407 		if (ostate == IEEE80211_S_INIT) {
1408 			ic->ic_flags |= IEEE80211_F_SCAN | IEEE80211_F_ASCAN;
1409 			/* let LED blink when scanning */
1410 			iwk_set_led(sc, 2, 10, 2);
1411 
1412 			if ((err = iwk_scan(sc)) != 0) {
1413 				IWK_DBG((IWK_DEBUG_80211,
1414 				    "could not initiate scan\n"));
1415 				ic->ic_flags &= ~(IEEE80211_F_SCAN |
1416 				    IEEE80211_F_ASCAN);
1417 				mutex_exit(&sc->sc_glock);
1418 				return (err);
1419 			}
1420 		}
1421 		ic->ic_state = nstate;
1422 		sc->sc_clk = 0;
1423 		mutex_exit(&sc->sc_glock);
1424 		return (IWK_SUCCESS);
1425 
1426 	case IEEE80211_S_AUTH:
1427 		/* reset state to handle reassociations correctly */
1428 		sc->sc_config.assoc_id = 0;
1429 		sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
1430 
1431 		/*
1432 		 * before sending authentication and association request frame,
1433 		 * we need do something in the hardware, such as setting the
1434 		 * channel same to the target AP...
1435 		 */
1436 		if ((err = iwk_hw_set_before_auth(sc)) != 0) {
1437 			IWK_DBG((IWK_DEBUG_80211,
1438 			    "could not send authentication request\n"));
1439 			mutex_exit(&sc->sc_glock);
1440 			return (err);
1441 		}
1442 		break;
1443 
1444 	case IEEE80211_S_RUN:
1445 		if (ic->ic_opmode == IEEE80211_M_MONITOR) {
1446 			/* let LED blink when monitoring */
1447 			iwk_set_led(sc, 2, 10, 10);
1448 			break;
1449 		}
1450 
1451 		if (ic->ic_opmode != IEEE80211_M_STA) {
1452 			(void) iwk_hw_set_before_auth(sc);
1453 			/* need setup beacon here */
1454 		}
1455 		IWK_DBG((IWK_DEBUG_80211, "iwk: associated."));
1456 
1457 		/* update adapter's configuration */
1458 		sc->sc_config.assoc_id = sc->sc_assoc_id & 0x3fff;
1459 		/* short preamble/slot time are negotiated when associating */
1460 		sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
1461 		    RXON_FLG_SHORT_SLOT_MSK);
1462 
1463 		if (ic->ic_flags & IEEE80211_F_SHSLOT)
1464 			sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK);
1465 
1466 		if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
1467 			sc->sc_config.flags |=
1468 			    LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
1469 
1470 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ASSOC_MSK);
1471 
1472 		if (ic->ic_opmode != IEEE80211_M_STA)
1473 			sc->sc_config.filter_flags |=
1474 			    LE_32(RXON_FILTER_BCON_AWARE_MSK);
1475 
1476 		IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x"
1477 		    " filter_flags %x\n",
1478 		    sc->sc_config.chan, sc->sc_config.flags,
1479 		    sc->sc_config.filter_flags));
1480 		err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
1481 		    sizeof (iwk_rxon_cmd_t), 1);
1482 		if (err != IWK_SUCCESS) {
1483 			IWK_DBG((IWK_DEBUG_80211,
1484 			    "could not update configuration\n"));
1485 			mutex_exit(&sc->sc_glock);
1486 			return (err);
1487 		}
1488 
1489 		/*
1490 		 * set Tx power for 2.4GHz channels
1491 		 * (need further investigation. fix tx power at present)
1492 		 * This cmd should be issued each time the reply_rxon cmd is
1493 		 * invoked.
1494 		 */
1495 		(void) memset(&txpower, 0, sizeof (txpower));
1496 		txpower.band = 1; /* for 2.4G */
1497 		txpower.channel = sc->sc_config.chan;
1498 		txpower.channel_normal_width = 0;
1499 		for (i = 0; i < POWER_TABLE_NUM_HT_OFDM_ENTRIES; i++) {
1500 			txpower.tx_power.ht_ofdm_power[i].s.ramon_tx_gain =
1501 			    0x3f3f;
1502 			txpower.tx_power.ht_ofdm_power[i].s.dsp_predis_atten =
1503 			    110 | (110 << 8);
1504 		}
1505 		txpower.tx_power.ht_ofdm_power[POWER_TABLE_NUM_HT_OFDM_ENTRIES]
1506 		    .s.ramon_tx_gain = 0x3f3f;
1507 		txpower.tx_power.ht_ofdm_power[POWER_TABLE_NUM_HT_OFDM_ENTRIES]
1508 		    .s.dsp_predis_atten = 110 | (110 << 8);
1509 		err = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &txpower,
1510 		    sizeof (txpower), 1);
1511 		if (err != IWK_SUCCESS) {
1512 			cmn_err(CE_WARN, "iwk_newstate(): failed to "
1513 			    "set txpower\n");
1514 			return (err);
1515 		}
1516 
1517 		/* start automatic rate control */
1518 		mutex_enter(&sc->sc_mt_lock);
1519 		if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
1520 			sc->sc_flags |= IWK_F_RATE_AUTO_CTL;
1521 			/* set rate to some reasonable initial value */
1522 			i = in->in_rates.ir_nrates - 1;
1523 			while (i > 0 && IEEE80211_RATE(i) > 72)
1524 				i--;
1525 			in->in_txrate = i;
1526 		} else {
1527 			sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL;
1528 		}
1529 		mutex_exit(&sc->sc_mt_lock);
1530 
1531 		/* set LED on after associated */
1532 		iwk_set_led(sc, 2, 0, 1);
1533 		break;
1534 
1535 	case IEEE80211_S_INIT:
1536 		/* set LED off after init */
1537 		iwk_set_led(sc, 2, 1, 0);
1538 		break;
1539 	case IEEE80211_S_ASSOC:
1540 		break;
1541 	}
1542 
1543 	mutex_exit(&sc->sc_glock);
1544 	return (sc->sc_newstate(ic, nstate, arg));
1545 }
1546 
1547 /*ARGSUSED*/
1548 static int iwk_key_set(ieee80211com_t *ic, const struct ieee80211_key *k,
1549     const uint8_t mac[IEEE80211_ADDR_LEN])
1550 {
1551 	iwk_sc_t *sc = (iwk_sc_t *)ic;
1552 	iwk_add_sta_t node;
1553 	int err;
1554 
1555 	switch (k->wk_cipher->ic_cipher) {
1556 	case IEEE80211_CIPHER_WEP:
1557 	case IEEE80211_CIPHER_TKIP:
1558 		return (1); /* sofeware do it. */
1559 	case IEEE80211_CIPHER_AES_CCM:
1560 		break;
1561 	default:
1562 		return (0);
1563 	}
1564 	sc->sc_config.filter_flags &= ~(RXON_FILTER_DIS_DECRYPT_MSK
1565 	    | RXON_FILTER_DIS_GRP_DECRYPT_MSK);
1566 
1567 	mutex_enter(&sc->sc_glock);
1568 
1569 	/* update ap/multicast node */
1570 	(void) memset(&node, 0, sizeof (node));
1571 	if (IEEE80211_IS_MULTICAST(mac)) {
1572 		(void) memset(node.bssid, 0xff, 6);
1573 		node.id = IWK_BROADCAST_ID;
1574 	} else {
1575 		IEEE80211_ADDR_COPY(node.bssid, ic->ic_bss->in_bssid);
1576 		node.id = IWK_AP_ID;
1577 	}
1578 	if (k->wk_flags & IEEE80211_KEY_XMIT) {
1579 		node.key_flags = 0;
1580 		node.keyp = k->wk_keyix;
1581 	} else {
1582 		node.key_flags = (1 << 14);
1583 		node.keyp = k->wk_keyix + 4;
1584 	}
1585 	(void) memcpy(node.key, k->wk_key, k->wk_keylen);
1586 	node.key_flags |= (STA_KEY_FLG_CCMP | (1 << 3) | (k->wk_keyix << 8));
1587 	node.sta_mask = STA_MODIFY_KEY_MASK;
1588 	node.control = 1;
1589 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
1590 	if (err != IWK_SUCCESS) {
1591 		cmn_err(CE_WARN, "iwk_key_set():"
1592 		    "failed to update ap node\n");
1593 		mutex_exit(&sc->sc_glock);
1594 		return (0);
1595 	}
1596 	mutex_exit(&sc->sc_glock);
1597 	return (1);
1598 }
1599 
1600 /*
1601  * exclusive access to mac begin.
1602  */
1603 static void
1604 iwk_mac_access_enter(iwk_sc_t *sc)
1605 {
1606 	uint32_t tmp;
1607 	int n;
1608 
1609 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
1610 	IWK_WRITE(sc, CSR_GP_CNTRL,
1611 	    tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1612 
1613 	/* wait until we succeed */
1614 	for (n = 0; n < 1000; n++) {
1615 		if ((IWK_READ(sc, CSR_GP_CNTRL) &
1616 		    (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1617 		    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) ==
1618 		    CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN)
1619 			break;
1620 		DELAY(10);
1621 	}
1622 	if (n == 1000)
1623 		IWK_DBG((IWK_DEBUG_PIO, "could not lock memory\n"));
1624 }
1625 
1626 /*
1627  * exclusive access to mac end.
1628  */
1629 static void
1630 iwk_mac_access_exit(iwk_sc_t *sc)
1631 {
1632 	uint32_t tmp = IWK_READ(sc, CSR_GP_CNTRL);
1633 	IWK_WRITE(sc, CSR_GP_CNTRL,
1634 	    tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1635 }
1636 
1637 /*
1638  * this function defined here for future use.
1639  * static uint32_t
1640  * iwk_mem_read(iwk_sc_t *sc, uint32_t addr)
1641  * {
1642  * 	IWK_WRITE(sc, HBUS_TARG_MEM_RADDR, addr);
1643  * 	return (IWK_READ(sc, HBUS_TARG_MEM_RDAT));
1644  * }
1645  */
1646 
1647 static void
1648 iwk_mem_write(iwk_sc_t *sc, uint32_t addr, uint32_t data)
1649 {
1650 	IWK_WRITE(sc, HBUS_TARG_MEM_WADDR, addr);
1651 	IWK_WRITE(sc, HBUS_TARG_MEM_WDAT, data);
1652 }
1653 
1654 static uint32_t
1655 iwk_reg_read(iwk_sc_t *sc, uint32_t addr)
1656 {
1657 	IWK_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24));
1658 	return (IWK_READ(sc, HBUS_TARG_PRPH_RDAT));
1659 }
1660 
1661 static void
1662 iwk_reg_write(iwk_sc_t *sc, uint32_t addr, uint32_t data)
1663 {
1664 	IWK_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24));
1665 	IWK_WRITE(sc, HBUS_TARG_PRPH_WDAT, data);
1666 }
1667 
1668 static void
1669 iwk_reg_write_region_4(iwk_sc_t *sc, uint32_t addr,
1670     uint32_t *data, int wlen)
1671 {
1672 	for (; wlen > 0; wlen--, data++, addr += 4)
1673 		iwk_reg_write(sc, addr, *data);
1674 }
1675 
1676 
1677 /*
1678  * ucode load/initialization steps:
1679  * 1)  load Bootstrap State Machine (BSM) with "bootstrap" uCode image.
1680  * BSM contains a small memory that *always* stays powered up, so it can
1681  * retain the bootstrap program even when the card is in a power-saving
1682  * power-down state.  The BSM loads the small program into ARC processor's
1683  * instruction memory when triggered by power-up.
1684  * 2)  load Initialize image via bootstrap program.
1685  * The Initialize image sets up regulatory and calibration data for the
1686  * Runtime/Protocol uCode. This sends a REPLY_ALIVE notification when completed.
1687  * The 4965 reply contains calibration data for temperature, voltage and tx gain
1688  * correction.
1689  */
1690 static int
1691 iwk_load_firmware(iwk_sc_t *sc)
1692 {
1693 	uint32_t *boot_fw = (uint32_t *)sc->sc_boot;
1694 	uint32_t size = sc->sc_hdr->bootsz;
1695 	int n, err = IWK_SUCCESS;
1696 
1697 	/*
1698 	 * The physical address bit [4-35] of the initialize uCode.
1699 	 * In the initialize alive notify interrupt the physical address of
1700 	 * the runtime ucode will be set for loading.
1701 	 */
1702 	iwk_mac_access_enter(sc);
1703 
1704 	iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG,
1705 	    sc->sc_dma_fw_init_text.cookie.dmac_address >> 4);
1706 	iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG,
1707 	    sc->sc_dma_fw_init_data.cookie.dmac_address >> 4);
1708 	iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG,
1709 	    sc->sc_dma_fw_init_text.cookie.dmac_size);
1710 	iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG,
1711 	    sc->sc_dma_fw_init_data.cookie.dmac_size);
1712 
1713 	/* load bootstrap code into BSM memory */
1714 	iwk_reg_write_region_4(sc, BSM_SRAM_LOWER_BOUND, boot_fw,
1715 	    size / sizeof (uint32_t));
1716 
1717 	iwk_reg_write(sc, BSM_WR_MEM_SRC_REG, 0);
1718 	iwk_reg_write(sc, BSM_WR_MEM_DST_REG, RTC_INST_LOWER_BOUND);
1719 	iwk_reg_write(sc, BSM_WR_DWCOUNT_REG, size / sizeof (uint32_t));
1720 
1721 	/*
1722 	 * prepare to load initialize uCode
1723 	 */
1724 	iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
1725 
1726 	/* wait while the adapter is busy loading the firmware */
1727 	for (n = 0; n < 1000; n++) {
1728 		if (!(iwk_reg_read(sc, BSM_WR_CTRL_REG) &
1729 		    BSM_WR_CTRL_REG_BIT_START))
1730 			break;
1731 		DELAY(10);
1732 	}
1733 	if (n == 1000) {
1734 		IWK_DBG((IWK_DEBUG_FW,
1735 		    "timeout transferring firmware\n"));
1736 		err = ETIMEDOUT;
1737 		return (err);
1738 	}
1739 
1740 	/* for future power-save mode use */
1741 	iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
1742 
1743 	iwk_mac_access_exit(sc);
1744 
1745 	return (err);
1746 }
1747 
1748 /*ARGSUSED*/
1749 static void
1750 iwk_rx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data)
1751 {
1752 	ieee80211com_t *ic = &sc->sc_ic;
1753 	iwk_rx_ring_t *ring = &sc->sc_rxq;
1754 	iwk_rx_phy_res_t *stat;
1755 	ieee80211_node_t *in;
1756 	uint32_t *tail;
1757 	struct ieee80211_frame *wh;
1758 	mblk_t *mp;
1759 	uint16_t len, rssi, mrssi, agc;
1760 	int16_t t;
1761 	uint32_t ants, i;
1762 	struct iwk_rx_non_cfg_phy *phyinfo;
1763 
1764 	/* assuming not 11n here. cope with 11n in phase-II */
1765 	stat = (iwk_rx_phy_res_t *)(desc + 1);
1766 	if (stat->cfg_phy_cnt > 20) {
1767 		return;
1768 	}
1769 
1770 	phyinfo = (struct iwk_rx_non_cfg_phy *)stat->non_cfg_phy;
1771 	agc = (phyinfo->agc_info & IWK_AGC_DB_MASK) >> IWK_AGC_DB_POS;
1772 	mrssi = 0;
1773 	ants = (stat->phy_flags & RX_PHY_FLAGS_ANTENNAE_MASK)
1774 	    >> RX_PHY_FLAGS_ANTENNAE_OFFSET;
1775 	for (i = 0; i < 3; i++) {
1776 		if (ants & (1 << i))
1777 			mrssi = MAX(mrssi, phyinfo->rssi_info[i << 1]);
1778 	}
1779 	t = mrssi - agc - 44; /* t is the dBM value */
1780 	/*
1781 	 * convert dBm to percentage ???
1782 	 */
1783 	rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t)))
1784 	    / (75 * 75);
1785 	if (rssi > 100)
1786 		rssi = 100;
1787 	if (rssi < 1)
1788 		rssi = 1;
1789 	len = stat->byte_count;
1790 	tail = (uint32_t *)((uint8_t *)(stat + 1) + stat->cfg_phy_cnt + len);
1791 
1792 	IWK_DBG((IWK_DEBUG_RX, "rx intr: idx=%d phy_len=%x len=%d "
1793 	    "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x "
1794 	    "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat),
1795 	    len, stat->rate.r.s.rate, stat->channel,
1796 	    LE_32(stat->timestampl), stat->non_cfg_phy_cnt,
1797 	    stat->cfg_phy_cnt, LE_32(*tail)));
1798 
1799 	if ((len < 16) || (len > sc->sc_dmabuf_sz)) {
1800 		IWK_DBG((IWK_DEBUG_RX, "rx frame oversize\n"));
1801 		return;
1802 	}
1803 
1804 	/*
1805 	 * discard Rx frames with bad CRC
1806 	 */
1807 	if ((LE_32(*tail) &
1808 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) !=
1809 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) {
1810 		IWK_DBG((IWK_DEBUG_RX, "rx crc error tail: %x\n",
1811 		    LE_32(*tail)));
1812 		sc->sc_rx_err++;
1813 		return;
1814 	}
1815 
1816 	wh = (struct ieee80211_frame *)
1817 	    ((uint8_t *)(stat + 1)+ stat->cfg_phy_cnt);
1818 	if (*(uint8_t *)wh == IEEE80211_FC0_SUBTYPE_ASSOC_RESP) {
1819 		sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2);
1820 		IWK_DBG((IWK_DEBUG_RX, "rx : association id = %x\n",
1821 		    sc->sc_assoc_id));
1822 	}
1823 #ifdef DEBUG
1824 	if (iwk_dbg_flags & IWK_DEBUG_RX)
1825 		ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0);
1826 #endif
1827 	in = ieee80211_find_rxnode(ic, wh);
1828 	mp = allocb(len, BPRI_MED);
1829 	if (mp) {
1830 		(void) memcpy(mp->b_wptr, wh, len);
1831 		mp->b_wptr += len;
1832 
1833 		/* send the frame to the 802.11 layer */
1834 		(void) ieee80211_input(ic, mp, in, rssi, 0);
1835 	} else {
1836 		sc->sc_rx_nobuf++;
1837 		IWK_DBG((IWK_DEBUG_RX,
1838 		    "iwk_rx_intr(): alloc rx buf failed\n"));
1839 	}
1840 	/* release node reference */
1841 	ieee80211_free_node(in);
1842 }
1843 
1844 /*ARGSUSED*/
1845 static void
1846 iwk_tx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data)
1847 {
1848 	ieee80211com_t *ic = &sc->sc_ic;
1849 	iwk_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3];
1850 	iwk_tx_stat_t *stat = (iwk_tx_stat_t *)(desc + 1);
1851 	iwk_amrr_t *amrr = (iwk_amrr_t *)ic->ic_bss;
1852 
1853 	IWK_DBG((IWK_DEBUG_TX, "tx done: qid=%d idx=%d"
1854 	    " retries=%d frame_count=%x nkill=%d "
1855 	    "rate=%x duration=%d status=%x\n",
1856 	    desc->hdr.qid, desc->hdr.idx, stat->ntries, stat->frame_count,
1857 	    stat->bt_kill_count, stat->rate.r.s.rate,
1858 	    LE_32(stat->duration), LE_32(stat->status)));
1859 
1860 	amrr->txcnt++;
1861 	IWK_DBG((IWK_DEBUG_RATECTL, "tx: %d cnt\n", amrr->txcnt));
1862 	if (stat->ntries > 0) {
1863 		amrr->retrycnt++;
1864 		sc->sc_tx_retries++;
1865 		IWK_DBG((IWK_DEBUG_TX, "tx: %d retries\n",
1866 		    sc->sc_tx_retries));
1867 	}
1868 
1869 	sc->sc_tx_timer = 0;
1870 
1871 	mutex_enter(&sc->sc_tx_lock);
1872 	ring->queued--;
1873 	if (ring->queued < 0)
1874 		ring->queued = 0;
1875 	if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count << 3))) {
1876 		sc->sc_need_reschedule = 0;
1877 		mutex_exit(&sc->sc_tx_lock);
1878 		mac_tx_update(ic->ic_mach);
1879 		mutex_enter(&sc->sc_tx_lock);
1880 	}
1881 	mutex_exit(&sc->sc_tx_lock);
1882 }
1883 
1884 static void
1885 iwk_cmd_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc)
1886 {
1887 	if ((desc->hdr.qid & 7) != 4) {
1888 		return;
1889 	}
1890 	mutex_enter(&sc->sc_glock);
1891 	sc->sc_flags |= IWK_F_CMD_DONE;
1892 	cv_signal(&sc->sc_cmd_cv);
1893 	mutex_exit(&sc->sc_glock);
1894 	IWK_DBG((IWK_DEBUG_CMD, "rx cmd: "
1895 	    "qid=%x idx=%d flags=%x type=0x%x\n",
1896 	    desc->hdr.qid, desc->hdr.idx, desc->hdr.flags,
1897 	    desc->hdr.type));
1898 }
1899 
1900 static void
1901 iwk_ucode_alive(iwk_sc_t *sc, iwk_rx_desc_t *desc)
1902 {
1903 	uint32_t base, i;
1904 	struct iwk_alive_resp *ar =
1905 	    (struct iwk_alive_resp *)(desc + 1);
1906 
1907 	/* the microcontroller is ready */
1908 	IWK_DBG((IWK_DEBUG_FW,
1909 	    "microcode alive notification minor: %x major: %x type:"
1910 	    " %x subtype: %x\n",
1911 	    ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype));
1912 
1913 	if (LE_32(ar->is_valid) != UCODE_VALID_OK) {
1914 		IWK_DBG((IWK_DEBUG_FW,
1915 		    "microcontroller initialization failed\n"));
1916 	}
1917 	if (ar->ver_subtype == INITIALIZE_SUBTYPE) {
1918 		IWK_DBG((IWK_DEBUG_FW,
1919 		    "initialization alive received.\n"));
1920 		(void) memcpy(&sc->sc_card_alive_init, ar,
1921 		    sizeof (struct iwk_init_alive_resp));
1922 		/* XXX get temperature */
1923 		iwk_mac_access_enter(sc);
1924 		iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG,
1925 		    sc->sc_dma_fw_text.cookie.dmac_address >> 4);
1926 		iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG,
1927 		    sc->sc_dma_fw_data_bak.cookie.dmac_address >> 4);
1928 		iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG,
1929 		    sc->sc_dma_fw_data.cookie.dmac_size);
1930 		iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG,
1931 		    sc->sc_dma_fw_text.cookie.dmac_size | 0x80000000);
1932 		iwk_mac_access_exit(sc);
1933 	} else {
1934 		IWK_DBG((IWK_DEBUG_FW, "runtime alive received.\n"));
1935 		(void) memcpy(&sc->sc_card_alive_run, ar,
1936 		    sizeof (struct iwk_alive_resp));
1937 
1938 		/*
1939 		 * Init SCD related registers to make Tx work. XXX
1940 		 */
1941 		iwk_mac_access_enter(sc);
1942 
1943 		/* read sram address of data base */
1944 		sc->sc_scd_base = iwk_reg_read(sc, SCD_SRAM_BASE_ADDR);
1945 
1946 		/* clear and init SCD_CONTEXT_DATA_OFFSET area. 128 bytes */
1947 		for (base = sc->sc_scd_base + SCD_CONTEXT_DATA_OFFSET, i = 0;
1948 		    i < 128; i += 4)
1949 			iwk_mem_write(sc, base + i, 0);
1950 
1951 		/* clear and init SCD_TX_STTS_BITMAP_OFFSET area. 256 bytes */
1952 		for (base = sc->sc_scd_base + SCD_TX_STTS_BITMAP_OFFSET;
1953 		    i < 256; i += 4)
1954 			iwk_mem_write(sc, base + i, 0);
1955 
1956 		/* clear and init SCD_TRANSLATE_TBL_OFFSET area. 32 bytes */
1957 		for (base = sc->sc_scd_base + SCD_TRANSLATE_TBL_OFFSET;
1958 		    i < sizeof (uint16_t) * IWK_NUM_QUEUES; i += 4)
1959 			iwk_mem_write(sc, base + i, 0);
1960 
1961 		iwk_reg_write(sc, SCD_DRAM_BASE_ADDR,
1962 		    sc->sc_dma_sh.cookie.dmac_address >> 10);
1963 		iwk_reg_write(sc, SCD_QUEUECHAIN_SEL, 0);
1964 
1965 		/* initiate the tx queues */
1966 		for (i = 0; i < IWK_NUM_QUEUES; i++) {
1967 			iwk_reg_write(sc, SCD_QUEUE_RDPTR(i), 0);
1968 			IWK_WRITE(sc, HBUS_TARG_WRPTR, (i << 8));
1969 			iwk_mem_write(sc, sc->sc_scd_base +
1970 			    SCD_CONTEXT_QUEUE_OFFSET(i),
1971 			    (SCD_WIN_SIZE & 0x7f));
1972 			iwk_mem_write(sc, sc->sc_scd_base +
1973 			    SCD_CONTEXT_QUEUE_OFFSET(i) + sizeof (uint32_t),
1974 			    (SCD_FRAME_LIMIT & 0x7f) << 16);
1975 		}
1976 		/* interrupt enable on each queue0-7 */
1977 		iwk_reg_write(sc, SCD_INTERRUPT_MASK,
1978 		    (1 << IWK_NUM_QUEUES) - 1);
1979 		/* enable  each channel 0-7 */
1980 		iwk_reg_write(sc, SCD_TXFACT,
1981 		    SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
1982 		/*
1983 		 * queue 0-7 maps to FIFO 0-7 and
1984 		 * all queues work under FIFO mode (none-scheduler-ack)
1985 		 */
1986 		for (i = 0; i < 7; i++) {
1987 			iwk_reg_write(sc,
1988 			    SCD_QUEUE_STATUS_BITS(i),
1989 			    (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
1990 			    (i << SCD_QUEUE_STTS_REG_POS_TXF)|
1991 			    SCD_QUEUE_STTS_REG_MSK);
1992 		}
1993 		iwk_mac_access_exit(sc);
1994 
1995 		sc->sc_flags |= IWK_F_FW_INIT;
1996 		cv_signal(&sc->sc_fw_cv);
1997 	}
1998 
1999 }
2000 
2001 static uint_t
2002 iwk_rx_softintr(caddr_t arg)
2003 {
2004 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2005 	ieee80211com_t *ic = &sc->sc_ic;
2006 	iwk_rx_desc_t *desc;
2007 	iwk_rx_data_t *data;
2008 	uint32_t index;
2009 
2010 	mutex_enter(&sc->sc_glock);
2011 	if (sc->sc_rx_softint_pending != 1) {
2012 		mutex_exit(&sc->sc_glock);
2013 		return (DDI_INTR_UNCLAIMED);
2014 	}
2015 	/* disable interrupts */
2016 	IWK_WRITE(sc, CSR_INT_MASK, 0);
2017 	mutex_exit(&sc->sc_glock);
2018 
2019 	/*
2020 	 * firmware has moved the index of the rx queue, driver get it,
2021 	 * and deal with it.
2022 	 */
2023 	index = LE_32(sc->sc_shared->val0) & 0xfff;
2024 
2025 	while (sc->sc_rxq.cur != index) {
2026 		data = &sc->sc_rxq.data[sc->sc_rxq.cur];
2027 		desc = (iwk_rx_desc_t *)data->dma_data.mem_va;
2028 
2029 		IWK_DBG((IWK_DEBUG_INTR, "rx notification index = %d"
2030 		    " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n",
2031 		    index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx,
2032 		    desc->hdr.flags, desc->hdr.type, LE_32(desc->len)));
2033 
2034 		/* a command other than a tx need to be replied */
2035 		if (!(desc->hdr.qid & 0x80) &&
2036 		    (desc->hdr.type != REPLY_RX_PHY_CMD) &&
2037 		    (desc->hdr.type != REPLY_TX))
2038 			iwk_cmd_intr(sc, desc);
2039 
2040 		switch (desc->hdr.type) {
2041 		case REPLY_4965_RX:
2042 			iwk_rx_intr(sc, desc, data);
2043 			break;
2044 
2045 		case REPLY_TX:
2046 			iwk_tx_intr(sc, desc, data);
2047 			break;
2048 
2049 		case REPLY_ALIVE:
2050 			iwk_ucode_alive(sc, desc);
2051 			break;
2052 
2053 		case CARD_STATE_NOTIFICATION:
2054 		{
2055 			uint32_t *status = (uint32_t *)(desc + 1);
2056 
2057 			IWK_DBG((IWK_DEBUG_RADIO, "state changed to %x\n",
2058 			    LE_32(*status)));
2059 
2060 			if (LE_32(*status) & 1) {
2061 				/*
2062 				 * the radio button has to be pushed(OFF). It
2063 				 * is considered as a hw error, the
2064 				 * iwk_thread() tries to recover it after the
2065 				 * button is pushed again(ON)
2066 				 */
2067 				cmn_err(CE_NOTE,
2068 				    "iwk: Radio transmitter is off\n");
2069 				sc->sc_ostate = sc->sc_ic.ic_state;
2070 				ieee80211_new_state(&sc->sc_ic,
2071 				    IEEE80211_S_INIT, -1);
2072 				sc->sc_flags |=
2073 				    (IWK_F_HW_ERR_RECOVER | IWK_F_RADIO_OFF);
2074 			}
2075 			break;
2076 		}
2077 		case SCAN_START_NOTIFICATION:
2078 		{
2079 			iwk_start_scan_t *scan =
2080 			    (iwk_start_scan_t *)(desc + 1);
2081 
2082 			IWK_DBG((IWK_DEBUG_SCAN,
2083 			    "scanning channel %d status %x\n",
2084 			    scan->chan, LE_32(scan->status)));
2085 
2086 			ic->ic_curchan = &ic->ic_sup_channels[scan->chan];
2087 			break;
2088 		}
2089 		case SCAN_COMPLETE_NOTIFICATION:
2090 			IWK_DBG((IWK_DEBUG_SCAN, "scan finished\n"));
2091 			ieee80211_end_scan(ic);
2092 			break;
2093 		}
2094 
2095 		sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE;
2096 	}
2097 
2098 	/*
2099 	 * driver dealt with what reveived in rx queue and tell the information
2100 	 * to the firmware.
2101 	 */
2102 	index = (index == 0) ? RX_QUEUE_SIZE - 1 : index - 1;
2103 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7));
2104 
2105 	mutex_enter(&sc->sc_glock);
2106 	/* re-enable interrupts */
2107 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2108 	sc->sc_rx_softint_pending = 0;
2109 	mutex_exit(&sc->sc_glock);
2110 
2111 	return (DDI_INTR_CLAIMED);
2112 }
2113 
2114 static uint_t
2115 iwk_intr(caddr_t arg)
2116 {
2117 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2118 	uint32_t r, rfh;
2119 
2120 	mutex_enter(&sc->sc_glock);
2121 
2122 	if (sc->sc_flags & IWK_F_SUSPEND) {
2123 		mutex_exit(&sc->sc_glock);
2124 		return (DDI_INTR_UNCLAIMED);
2125 	}
2126 
2127 	r = IWK_READ(sc, CSR_INT);
2128 	if (r == 0 || r == 0xffffffff) {
2129 		mutex_exit(&sc->sc_glock);
2130 		return (DDI_INTR_UNCLAIMED);
2131 	}
2132 
2133 	IWK_DBG((IWK_DEBUG_INTR, "interrupt reg %x\n", r));
2134 
2135 	rfh = IWK_READ(sc, CSR_FH_INT_STATUS);
2136 	IWK_DBG((IWK_DEBUG_INTR, "FH interrupt reg %x\n", rfh));
2137 	/* disable interrupts */
2138 	IWK_WRITE(sc, CSR_INT_MASK, 0);
2139 	/* ack interrupts */
2140 	IWK_WRITE(sc, CSR_INT, r);
2141 	IWK_WRITE(sc, CSR_FH_INT_STATUS, rfh);
2142 
2143 	if (sc->sc_rx_softint_id == NULL) {
2144 		mutex_exit(&sc->sc_glock);
2145 		return (DDI_INTR_CLAIMED);
2146 	}
2147 
2148 	if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) {
2149 		IWK_DBG((IWK_DEBUG_FW, "fatal firmware error\n"));
2150 		mutex_exit(&sc->sc_glock);
2151 		iwk_stop(sc);
2152 		sc->sc_ostate = sc->sc_ic.ic_state;
2153 		ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
2154 		sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
2155 		return (DDI_INTR_CLAIMED);
2156 	}
2157 
2158 	if (r & BIT_INT_RF_KILL) {
2159 		IWK_DBG((IWK_DEBUG_RADIO, "RF kill\n"));
2160 	}
2161 
2162 	if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) ||
2163 	    (rfh & FH_INT_RX_MASK)) {
2164 		sc->sc_rx_softint_pending = 1;
2165 		ddi_trigger_softintr(sc->sc_rx_softint_id);
2166 	}
2167 
2168 	if (r & BIT_INT_ALIVE)	{
2169 		IWK_DBG((IWK_DEBUG_FW, "firmware initialized.\n"));
2170 	}
2171 
2172 	/* re-enable interrupts */
2173 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2174 	mutex_exit(&sc->sc_glock);
2175 
2176 	return (DDI_INTR_CLAIMED);
2177 }
2178 
2179 static uint8_t
2180 iwk_rate_to_plcp(int rate)
2181 {
2182 	uint8_t ret;
2183 
2184 	switch (rate) {
2185 	/* CCK rates */
2186 	case 2:
2187 		ret = 0xa;
2188 		break;
2189 	case 4:
2190 		ret = 0x14;
2191 		break;
2192 	case 11:
2193 		ret = 0x37;
2194 		break;
2195 	case 22:
2196 		ret = 0x6e;
2197 		break;
2198 	/* OFDM rates */
2199 	case 12:
2200 		ret = 0xd;
2201 		break;
2202 	case 18:
2203 		ret = 0xf;
2204 		break;
2205 	case 24:
2206 		ret = 0x5;
2207 		break;
2208 	case 36:
2209 		ret = 0x7;
2210 		break;
2211 	case 48:
2212 		ret = 0x9;
2213 		break;
2214 	case 72:
2215 		ret = 0xb;
2216 		break;
2217 	case 96:
2218 		ret = 0x1;
2219 		break;
2220 	case 108:
2221 		ret = 0x3;
2222 		break;
2223 	default:
2224 		ret = 0;
2225 		break;
2226 	}
2227 	return (ret);
2228 }
2229 
2230 static mblk_t *
2231 iwk_m_tx(void *arg, mblk_t *mp)
2232 {
2233 	iwk_sc_t	*sc = (iwk_sc_t *)arg;
2234 	ieee80211com_t	*ic = &sc->sc_ic;
2235 	mblk_t			*next;
2236 
2237 	if (sc->sc_flags & IWK_F_SUSPEND) {
2238 		freemsgchain(mp);
2239 		return (NULL);
2240 	}
2241 
2242 	if (ic->ic_state != IEEE80211_S_RUN) {
2243 		freemsgchain(mp);
2244 		return (NULL);
2245 	}
2246 
2247 	while (mp != NULL) {
2248 		next = mp->b_next;
2249 		mp->b_next = NULL;
2250 		if (iwk_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) {
2251 			mp->b_next = next;
2252 			break;
2253 		}
2254 		mp = next;
2255 	}
2256 	return (mp);
2257 }
2258 
2259 /* ARGSUSED */
2260 static int
2261 iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
2262 {
2263 	iwk_sc_t *sc = (iwk_sc_t *)ic;
2264 	iwk_tx_ring_t *ring;
2265 	iwk_tx_desc_t *desc;
2266 	iwk_tx_data_t *data;
2267 	iwk_cmd_t *cmd;
2268 	iwk_tx_cmd_t *tx;
2269 	ieee80211_node_t *in;
2270 	struct ieee80211_frame *wh;
2271 	struct ieee80211_key *k = NULL;
2272 	mblk_t *m, *m0;
2273 	int rate, hdrlen, len, len0, mblen, off, err = IWK_SUCCESS;
2274 	uint16_t masks = 0;
2275 
2276 	ring = &sc->sc_txq[0];
2277 	data = &ring->data[ring->cur];
2278 	desc = data->desc;
2279 	cmd = data->cmd;
2280 	bzero(desc, sizeof (*desc));
2281 	bzero(cmd, sizeof (*cmd));
2282 
2283 	mutex_enter(&sc->sc_tx_lock);
2284 	if (sc->sc_flags & IWK_F_SUSPEND) {
2285 		mutex_exit(&sc->sc_tx_lock);
2286 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
2287 		    IEEE80211_FC0_TYPE_DATA) {
2288 			freemsg(mp);
2289 		}
2290 		err = IWK_FAIL;
2291 		goto exit;
2292 	}
2293 
2294 	if (ring->queued > ring->count - 64) {
2295 		IWK_DBG((IWK_DEBUG_TX, "iwk_send(): no txbuf\n"));
2296 		sc->sc_need_reschedule = 1;
2297 		mutex_exit(&sc->sc_tx_lock);
2298 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
2299 		    IEEE80211_FC0_TYPE_DATA) {
2300 			freemsg(mp);
2301 		}
2302 		sc->sc_tx_nobuf++;
2303 		err = IWK_FAIL;
2304 		goto exit;
2305 	}
2306 	mutex_exit(&sc->sc_tx_lock);
2307 
2308 	hdrlen = sizeof (struct ieee80211_frame);
2309 
2310 	m = allocb(msgdsize(mp) + 32, BPRI_MED);
2311 	if (m == NULL) { /* can not alloc buf, drop this package */
2312 		cmn_err(CE_WARN,
2313 		    "iwk_send(): failed to allocate msgbuf\n");
2314 		freemsg(mp);
2315 		err = IWK_SUCCESS;
2316 		goto exit;
2317 	}
2318 	for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) {
2319 		mblen = MBLKL(m0);
2320 		(void) memcpy(m->b_rptr + off, m0->b_rptr, mblen);
2321 		off += mblen;
2322 	}
2323 	m->b_wptr += off;
2324 	freemsg(mp);
2325 
2326 	wh = (struct ieee80211_frame *)m->b_rptr;
2327 
2328 	in = ieee80211_find_txnode(ic, wh->i_addr1);
2329 	if (in == NULL) {
2330 		cmn_err(CE_WARN, "iwk_send(): failed to find tx node\n");
2331 		freemsg(m);
2332 		sc->sc_tx_err++;
2333 		err = IWK_SUCCESS;
2334 		goto exit;
2335 	}
2336 	(void) ieee80211_encap(ic, m, in);
2337 
2338 	cmd->hdr.type = REPLY_TX;
2339 	cmd->hdr.flags = 0;
2340 	cmd->hdr.qid = ring->qid;
2341 	cmd->hdr.idx = ring->cur;
2342 
2343 	tx = (iwk_tx_cmd_t *)cmd->data;
2344 	tx->tx_flags = 0;
2345 
2346 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2347 		tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK));
2348 	} else {
2349 		tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
2350 	}
2351 
2352 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
2353 		k = ieee80211_crypto_encap(ic, m);
2354 		if (k == NULL) {
2355 			freemsg(m);
2356 			sc->sc_tx_err++;
2357 			err = IWK_SUCCESS;
2358 			goto exit;
2359 		}
2360 
2361 		if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
2362 			tx->sec_ctl = 2; /* for CCMP */
2363 			tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
2364 			(void) memcpy(&tx->key, k->wk_key, k->wk_keylen);
2365 		}
2366 
2367 		/* packet header may have moved, reset our local pointer */
2368 		wh = (struct ieee80211_frame *)m->b_rptr;
2369 	}
2370 
2371 	len = msgdsize(m);
2372 
2373 #ifdef DEBUG
2374 	if (iwk_dbg_flags & IWK_DEBUG_TX)
2375 		ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0);
2376 #endif
2377 
2378 	/* pickup a rate */
2379 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
2380 	    IEEE80211_FC0_TYPE_MGT) {
2381 		/* mgmt frames are sent at 1M */
2382 		rate = in->in_rates.ir_rates[0];
2383 	} else {
2384 		/*
2385 		 * do it here for the software way rate control.
2386 		 * later for rate scaling in hardware.
2387 		 * maybe like the following, for management frame:
2388 		 * tx->initial_rate_index = LINK_QUAL_MAX_RETRY_NUM - 1;
2389 		 * for data frame:
2390 		 * tx->tx_flags |= (LE_32(TX_CMD_FLG_STA_RATE_MSK));
2391 		 * rate = in->in_rates.ir_rates[in->in_txrate];
2392 		 * tx->initial_rate_index = 1;
2393 		 *
2394 		 * now the txrate is determined in tx cmd flags, set to the
2395 		 * max value 54M for 11g and 11M for 11b.
2396 		 */
2397 
2398 		if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
2399 			rate = ic->ic_fixed_rate;
2400 		} else {
2401 			rate = in->in_rates.ir_rates[in->in_txrate];
2402 		}
2403 	}
2404 	rate &= IEEE80211_RATE_VAL;
2405 	IWK_DBG((IWK_DEBUG_TX, "tx rate[%d of %d] = %x",
2406 	    in->in_txrate, in->in_rates.ir_nrates, rate));
2407 
2408 	tx->tx_flags |= (LE_32(TX_CMD_FLG_SEQ_CTL_MSK));
2409 
2410 	len0 = roundup(4 + sizeof (iwk_tx_cmd_t) + hdrlen, 4);
2411 	if (len0 != (4 + sizeof (iwk_tx_cmd_t) + hdrlen))
2412 		tx->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
2413 
2414 	/* retrieve destination node's id */
2415 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2416 		tx->sta_id = IWK_BROADCAST_ID;
2417 	} else {
2418 		tx->sta_id = IWK_AP_ID;
2419 	}
2420 
2421 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
2422 	    IEEE80211_FC0_TYPE_MGT) {
2423 		/* tell h/w to set timestamp in probe responses */
2424 		if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2425 		    IEEE80211_FC0_SUBTYPE_PROBE_RESP)
2426 			tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK);
2427 
2428 		if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2429 		    IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
2430 		    ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2431 		    IEEE80211_FC0_SUBTYPE_REASSOC_REQ))
2432 			tx->timeout.pm_frame_timeout = 3;
2433 		else
2434 			tx->timeout.pm_frame_timeout = 2;
2435 	} else
2436 		tx->timeout.pm_frame_timeout = 0;
2437 	if (rate == 2 || rate == 4 || rate == 11 || rate == 22)
2438 		masks |= RATE_MCS_CCK_MSK;
2439 
2440 	masks |= RATE_MCS_ANT_B_MSK;
2441 	tx->rate.r.rate_n_flags = (iwk_rate_to_plcp(rate) | masks);
2442 
2443 	IWK_DBG((IWK_DEBUG_TX, "tx flag = %x",
2444 	    tx->tx_flags));
2445 
2446 	tx->rts_retry_limit = 60;
2447 	tx->data_retry_limit = 15;
2448 
2449 	tx->stop_time.life_time  = LE_32(0xffffffff);
2450 
2451 	tx->len = LE_16(len);
2452 
2453 	tx->dram_lsb_ptr =
2454 	    data->paddr_cmd + 4 + offsetof(iwk_tx_cmd_t, scratch);
2455 	tx->dram_msb_ptr = 0;
2456 	tx->driver_txop = 0;
2457 	tx->next_frame_len = 0;
2458 
2459 	(void) memcpy(tx + 1, m->b_rptr, hdrlen);
2460 	m->b_rptr += hdrlen;
2461 	(void) memcpy(data->dma_data.mem_va, m->b_rptr, len - hdrlen);
2462 
2463 	IWK_DBG((IWK_DEBUG_TX, "sending data: qid=%d idx=%d len=%d",
2464 	    ring->qid, ring->cur, len));
2465 
2466 	/*
2467 	 * first segment includes the tx cmd plus the 802.11 header,
2468 	 * the second includes the remaining of the 802.11 frame.
2469 	 */
2470 	desc->val0 = LE_32(2 << 24);
2471 	desc->pa[0].tb1_addr = LE_32(data->paddr_cmd);
2472 	desc->pa[0].val1 = ((len0 << 4) & 0xfff0) |
2473 	    ((data->dma_data.cookie.dmac_address & 0xffff) << 16);
2474 	desc->pa[0].val2 =
2475 	    ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) |
2476 	    ((len - hdrlen) << 20);
2477 	IWK_DBG((IWK_DEBUG_TX, "phy addr1 = 0x%x phy addr2 = 0x%x "
2478 	    "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x",
2479 	    data->paddr_cmd, data->dma_data.cookie.dmac_address,
2480 	    len0, len - hdrlen, desc->pa[0].val1, desc->pa[0].val2));
2481 
2482 	mutex_enter(&sc->sc_tx_lock);
2483 	ring->queued++;
2484 	mutex_exit(&sc->sc_tx_lock);
2485 
2486 	/* kick ring */
2487 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].tfd_offset[ring->cur].val
2488 	    = 8 + len;
2489 	if (ring->cur < IWK_MAX_WIN_SIZE) {
2490 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
2491 		    tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8 + len;
2492 	}
2493 
2494 	IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
2495 	IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
2496 
2497 	ring->cur = (ring->cur + 1) % ring->count;
2498 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
2499 	freemsg(m);
2500 	/* release node reference */
2501 	ieee80211_free_node(in);
2502 
2503 	ic->ic_stats.is_tx_bytes += len;
2504 	ic->ic_stats.is_tx_frags++;
2505 
2506 	if (sc->sc_tx_timer == 0)
2507 		sc->sc_tx_timer = 10;
2508 exit:
2509 	return (err);
2510 }
2511 
2512 static void
2513 iwk_m_ioctl(void* arg, queue_t *wq, mblk_t *mp)
2514 {
2515 	iwk_sc_t	*sc  = (iwk_sc_t *)arg;
2516 	ieee80211com_t	*ic = &sc->sc_ic;
2517 	int		err;
2518 
2519 	err = ieee80211_ioctl(ic, wq, mp);
2520 	if (err == ENETRESET) {
2521 		/*
2522 		 * This is special for the hidden AP connection.
2523 		 * In any case, we should make sure only one 'scan'
2524 		 * in the driver for a 'connect' CLI command. So
2525 		 * when connecting to a hidden AP, the scan is just
2526 		 * sent out to the air when we know the desired
2527 		 * essid of the AP we want to connect.
2528 		 */
2529 		if (ic->ic_des_esslen) {
2530 			(void) ieee80211_new_state(ic,
2531 			    IEEE80211_S_SCAN, -1);
2532 		}
2533 	}
2534 }
2535 
2536 /*ARGSUSED*/
2537 static int
2538 iwk_m_stat(void *arg, uint_t stat, uint64_t *val)
2539 {
2540 	iwk_sc_t	*sc  = (iwk_sc_t *)arg;
2541 	ieee80211com_t	*ic = &sc->sc_ic;
2542 	ieee80211_node_t *in = ic->ic_bss;
2543 	struct ieee80211_rateset *rs = &in->in_rates;
2544 
2545 	mutex_enter(&sc->sc_glock);
2546 	switch (stat) {
2547 	case MAC_STAT_IFSPEED:
2548 		*val = ((ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) ?
2549 		    (rs->ir_rates[in->in_txrate] & IEEE80211_RATE_VAL)
2550 		    : ic->ic_fixed_rate) /2 * 1000000;
2551 		break;
2552 	case MAC_STAT_NOXMTBUF:
2553 		*val = sc->sc_tx_nobuf;
2554 		break;
2555 	case MAC_STAT_NORCVBUF:
2556 		*val = sc->sc_rx_nobuf;
2557 		break;
2558 	case MAC_STAT_IERRORS:
2559 		*val = sc->sc_rx_err;
2560 		break;
2561 	case MAC_STAT_RBYTES:
2562 		*val = ic->ic_stats.is_rx_bytes;
2563 		break;
2564 	case MAC_STAT_IPACKETS:
2565 		*val = ic->ic_stats.is_rx_frags;
2566 		break;
2567 	case MAC_STAT_OBYTES:
2568 		*val = ic->ic_stats.is_tx_bytes;
2569 		break;
2570 	case MAC_STAT_OPACKETS:
2571 		*val = ic->ic_stats.is_tx_frags;
2572 		break;
2573 	case MAC_STAT_OERRORS:
2574 	case WIFI_STAT_TX_FAILED:
2575 		*val = sc->sc_tx_err;
2576 		break;
2577 	case WIFI_STAT_TX_RETRANS:
2578 		*val = sc->sc_tx_retries;
2579 		break;
2580 	case WIFI_STAT_FCS_ERRORS:
2581 	case WIFI_STAT_WEP_ERRORS:
2582 	case WIFI_STAT_TX_FRAGS:
2583 	case WIFI_STAT_MCAST_TX:
2584 	case WIFI_STAT_RTS_SUCCESS:
2585 	case WIFI_STAT_RTS_FAILURE:
2586 	case WIFI_STAT_ACK_FAILURE:
2587 	case WIFI_STAT_RX_FRAGS:
2588 	case WIFI_STAT_MCAST_RX:
2589 	case WIFI_STAT_RX_DUPS:
2590 		mutex_exit(&sc->sc_glock);
2591 		return (ieee80211_stat(ic, stat, val));
2592 	default:
2593 		mutex_exit(&sc->sc_glock);
2594 		return (ENOTSUP);
2595 	}
2596 	mutex_exit(&sc->sc_glock);
2597 
2598 	return (IWK_SUCCESS);
2599 
2600 }
2601 
2602 static int
2603 iwk_m_start(void *arg)
2604 {
2605 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2606 	ieee80211com_t	*ic = &sc->sc_ic;
2607 	int err;
2608 
2609 	err = iwk_init(sc);
2610 
2611 	if (err != IWK_SUCCESS) {
2612 		/*
2613 		 * The hw init err(eg. RF is OFF). Return Success to make
2614 		 * the 'plumb' succeed. The iwk_thread() tries to re-init
2615 		 * background.
2616 		 */
2617 		mutex_enter(&sc->sc_glock);
2618 		sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
2619 		mutex_exit(&sc->sc_glock);
2620 		return (IWK_SUCCESS);
2621 	}
2622 
2623 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2624 
2625 	mutex_enter(&sc->sc_glock);
2626 	sc->sc_flags |= IWK_F_RUNNING;
2627 	mutex_exit(&sc->sc_glock);
2628 
2629 	return (IWK_SUCCESS);
2630 }
2631 
2632 static void
2633 iwk_m_stop(void *arg)
2634 {
2635 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2636 	ieee80211com_t	*ic = &sc->sc_ic;
2637 
2638 	iwk_stop(sc);
2639 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2640 	mutex_enter(&sc->sc_mt_lock);
2641 	sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
2642 	sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL;
2643 	mutex_exit(&sc->sc_mt_lock);
2644 	mutex_enter(&sc->sc_glock);
2645 	sc->sc_flags &= ~IWK_F_RUNNING;
2646 	mutex_exit(&sc->sc_glock);
2647 }
2648 
2649 /*ARGSUSED*/
2650 static int
2651 iwk_m_unicst(void *arg, const uint8_t *macaddr)
2652 {
2653 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2654 	ieee80211com_t	*ic = &sc->sc_ic;
2655 	int err;
2656 
2657 	if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) {
2658 		IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr);
2659 		mutex_enter(&sc->sc_glock);
2660 		err = iwk_config(sc);
2661 		mutex_exit(&sc->sc_glock);
2662 		if (err != IWK_SUCCESS) {
2663 			cmn_err(CE_WARN,
2664 			    "iwk_m_unicst(): "
2665 			    "failed to configure device\n");
2666 			goto fail;
2667 		}
2668 	}
2669 	return (IWK_SUCCESS);
2670 fail:
2671 	return (err);
2672 }
2673 
2674 /*ARGSUSED*/
2675 static int
2676 iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m)
2677 {
2678 	return (IWK_SUCCESS);
2679 }
2680 
2681 /*ARGSUSED*/
2682 static int
2683 iwk_m_promisc(void *arg, boolean_t on)
2684 {
2685 	return (IWK_SUCCESS);
2686 }
2687 
2688 static void
2689 iwk_thread(iwk_sc_t *sc)
2690 {
2691 	ieee80211com_t	*ic = &sc->sc_ic;
2692 	clock_t clk;
2693 	int times = 0, err, n = 0, timeout = 0;
2694 	uint32_t tmp;
2695 
2696 	mutex_enter(&sc->sc_mt_lock);
2697 	while (sc->sc_mf_thread_switch) {
2698 		tmp = IWK_READ(sc, CSR_GP_CNTRL);
2699 		if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) {
2700 			sc->sc_flags &= ~IWK_F_RADIO_OFF;
2701 		} else {
2702 			sc->sc_flags |= IWK_F_RADIO_OFF;
2703 		}
2704 		/*
2705 		 * If in SUSPEND or the RF is OFF, do nothing
2706 		 */
2707 		if ((sc->sc_flags & IWK_F_SUSPEND) ||
2708 		    (sc->sc_flags & IWK_F_RADIO_OFF)) {
2709 			mutex_exit(&sc->sc_mt_lock);
2710 			delay(drv_usectohz(100000));
2711 			mutex_enter(&sc->sc_mt_lock);
2712 			continue;
2713 		}
2714 
2715 		/*
2716 		 * recovery fatal error
2717 		 */
2718 		if (ic->ic_mach &&
2719 		    (sc->sc_flags & IWK_F_HW_ERR_RECOVER)) {
2720 
2721 			IWK_DBG((IWK_DEBUG_FW,
2722 			    "iwk_thread(): "
2723 			    "try to recover fatal hw error: %d\n", times++));
2724 
2725 			iwk_stop(sc);
2726 			ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2727 
2728 			mutex_exit(&sc->sc_mt_lock);
2729 			delay(drv_usectohz(2000000 + n*500000));
2730 			mutex_enter(&sc->sc_mt_lock);
2731 			err = iwk_init(sc);
2732 			if (err != IWK_SUCCESS) {
2733 				n++;
2734 				if (n < 20)
2735 					continue;
2736 			}
2737 			n = 0;
2738 			if (!err)
2739 				sc->sc_flags |= IWK_F_RUNNING;
2740 			sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
2741 			mutex_exit(&sc->sc_mt_lock);
2742 			delay(drv_usectohz(2000000));
2743 			if (sc->sc_ostate != IEEE80211_S_INIT)
2744 				ieee80211_new_state(ic, IEEE80211_S_SCAN, 0);
2745 			mutex_enter(&sc->sc_mt_lock);
2746 		}
2747 
2748 		/*
2749 		 * rate ctl
2750 		 */
2751 		if (ic->ic_mach &&
2752 		    (sc->sc_flags & IWK_F_RATE_AUTO_CTL)) {
2753 			clk = ddi_get_lbolt();
2754 			if (clk > sc->sc_clk + drv_usectohz(500000)) {
2755 				iwk_amrr_timeout(sc);
2756 			}
2757 		}
2758 
2759 		mutex_exit(&sc->sc_mt_lock);
2760 		delay(drv_usectohz(100000));
2761 		mutex_enter(&sc->sc_mt_lock);
2762 
2763 		if (sc->sc_tx_timer) {
2764 			timeout++;
2765 			if (timeout == 10) {
2766 				sc->sc_tx_timer--;
2767 				if (sc->sc_tx_timer == 0) {
2768 					sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
2769 					sc->sc_ostate = IEEE80211_S_RUN;
2770 					IWK_DBG((IWK_DEBUG_FW,
2771 					    "iwk_thread(): try to recover from"
2772 					    " 'send fail\n"));
2773 				}
2774 				timeout = 0;
2775 			}
2776 		}
2777 
2778 	}
2779 	sc->sc_mf_thread = NULL;
2780 	cv_signal(&sc->sc_mt_cv);
2781 	mutex_exit(&sc->sc_mt_lock);
2782 }
2783 
2784 
2785 /*
2786  * Send a command to the firmware.
2787  */
2788 static int
2789 iwk_cmd(iwk_sc_t *sc, int code, const void *buf, int size, int async)
2790 {
2791 	iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM];
2792 	iwk_tx_desc_t *desc;
2793 	iwk_cmd_t *cmd;
2794 
2795 	ASSERT(size <= sizeof (cmd->data));
2796 	ASSERT(mutex_owned(&sc->sc_glock));
2797 
2798 	IWK_DBG((IWK_DEBUG_CMD, "iwk_cmd() code[%d]", code));
2799 	desc = ring->data[ring->cur].desc;
2800 	cmd = ring->data[ring->cur].cmd;
2801 
2802 	cmd->hdr.type = (uint8_t)code;
2803 	cmd->hdr.flags = 0;
2804 	cmd->hdr.qid = ring->qid;
2805 	cmd->hdr.idx = ring->cur;
2806 	(void) memcpy(cmd->data, buf, size);
2807 	(void) memset(desc, 0, sizeof (*desc));
2808 
2809 	desc->val0 = LE_32(1 << 24);
2810 	desc->pa[0].tb1_addr =
2811 	    (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff);
2812 	desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0;
2813 
2814 	/* kick cmd ring XXX */
2815 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid]
2816 	    .tfd_offset[ring->cur].val = 8;
2817 	if (ring->cur < IWK_MAX_WIN_SIZE) {
2818 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid]
2819 		    .tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8;
2820 	}
2821 	ring->cur = (ring->cur + 1) % ring->count;
2822 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
2823 
2824 	if (async)
2825 		return (IWK_SUCCESS);
2826 	else {
2827 		clock_t clk;
2828 		sc->sc_flags &= ~IWK_F_CMD_DONE;
2829 		clk = ddi_get_lbolt() + drv_usectohz(2000000);
2830 		while (!(sc->sc_flags & IWK_F_CMD_DONE)) {
2831 			if (cv_timedwait(&sc->sc_cmd_cv, &sc->sc_glock, clk)
2832 			    < 0)
2833 				break;
2834 		}
2835 		if (sc->sc_flags & IWK_F_CMD_DONE)
2836 			return (IWK_SUCCESS);
2837 		else
2838 			return (IWK_FAIL);
2839 	}
2840 }
2841 
2842 static void
2843 iwk_set_led(iwk_sc_t *sc, uint8_t id, uint8_t off, uint8_t on)
2844 {
2845 	iwk_led_cmd_t led;
2846 
2847 	led.interval = LE_32(100000);	/* unit: 100ms */
2848 	led.id = id;
2849 	led.off = off;
2850 	led.on = on;
2851 
2852 	(void) iwk_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1);
2853 }
2854 
2855 static int
2856 iwk_hw_set_before_auth(iwk_sc_t *sc)
2857 {
2858 	ieee80211com_t *ic = &sc->sc_ic;
2859 	ieee80211_node_t *in = ic->ic_bss;
2860 	iwk_tx_power_table_cmd_t txpower;
2861 	iwk_add_sta_t node;
2862 	iwk_link_quality_cmd_t link_quality;
2863 	struct ieee80211_rateset rs;
2864 	uint16_t masks = 0, rate;
2865 	int i, err;
2866 
2867 	/* update adapter's configuration according the info of target AP */
2868 	IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
2869 	sc->sc_config.chan = ieee80211_chan2ieee(ic, in->in_chan);
2870 	if (ic->ic_curmode == IEEE80211_MODE_11B) {
2871 		sc->sc_config.cck_basic_rates  = 0x03;
2872 		sc->sc_config.ofdm_basic_rates = 0;
2873 	} else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
2874 	    (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
2875 		sc->sc_config.cck_basic_rates  = 0;
2876 		sc->sc_config.ofdm_basic_rates = 0x15;
2877 	} else { /* assume 802.11b/g */
2878 		sc->sc_config.cck_basic_rates  = 0x0f;
2879 		sc->sc_config.ofdm_basic_rates = 0xff;
2880 	}
2881 
2882 	sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
2883 	    RXON_FLG_SHORT_SLOT_MSK);
2884 
2885 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
2886 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK);
2887 	else
2888 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK);
2889 
2890 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
2891 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
2892 	else
2893 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK);
2894 
2895 	IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x "
2896 	    "filter_flags %x  cck %x ofdm %x"
2897 	    " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n",
2898 	    sc->sc_config.chan, sc->sc_config.flags,
2899 	    sc->sc_config.filter_flags,
2900 	    sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates,
2901 	    sc->sc_config.bssid[0], sc->sc_config.bssid[1],
2902 	    sc->sc_config.bssid[2], sc->sc_config.bssid[3],
2903 	    sc->sc_config.bssid[4], sc->sc_config.bssid[5]));
2904 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
2905 	    sizeof (iwk_rxon_cmd_t), 1);
2906 	if (err != IWK_SUCCESS) {
2907 		cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
2908 		    " failed to config chan%d\n",
2909 		    sc->sc_config.chan);
2910 		return (err);
2911 	}
2912 
2913 	/*
2914 	 * set Tx power for 2.4GHz channels
2915 	 * (need further investigation. fix tx power at present)
2916 	 */
2917 	(void) memset(&txpower, 0, sizeof (txpower));
2918 	txpower.band = 1; /* for 2.4G */
2919 	txpower.channel = sc->sc_config.chan;
2920 	txpower.channel_normal_width = 0;
2921 	for (i = 0; i < POWER_TABLE_NUM_HT_OFDM_ENTRIES; i++) {
2922 		txpower.tx_power.ht_ofdm_power[i].s
2923 		    .ramon_tx_gain = 0x3f3f;
2924 		txpower.tx_power.ht_ofdm_power[i].s
2925 		    .dsp_predis_atten = 110 | (110 << 8);
2926 	}
2927 	txpower.tx_power.ht_ofdm_power[POWER_TABLE_NUM_HT_OFDM_ENTRIES].
2928 	    s.ramon_tx_gain = 0x3f3f;
2929 	txpower.tx_power.ht_ofdm_power[POWER_TABLE_NUM_HT_OFDM_ENTRIES].
2930 	    s.dsp_predis_atten = 110 | (110 << 8);
2931 	err = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &txpower,
2932 	    sizeof (txpower), 1);
2933 	if (err != IWK_SUCCESS) {
2934 		cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
2935 		    " failed to set txpower\n");
2936 		return (err);
2937 	}
2938 
2939 	/* add default AP node */
2940 	(void) memset(&node, 0, sizeof (node));
2941 	IEEE80211_ADDR_COPY(node.bssid, in->in_bssid);
2942 	node.id = IWK_AP_ID;
2943 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
2944 	if (err != IWK_SUCCESS) {
2945 		cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
2946 		    " failed to add BSS node\n");
2947 		return (err);
2948 	}
2949 
2950 	/* TX_LINK_QUALITY cmd ? */
2951 	(void) memset(&link_quality, 0, sizeof (link_quality));
2952 	rs = ic->ic_sup_rates[ieee80211_chan2mode(ic, ic->ic_curchan)];
2953 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
2954 		if (i < rs.ir_nrates)
2955 			rate = rs.ir_rates[rs.ir_nrates - i];
2956 		else
2957 			rate = 2;
2958 		if (rate == 2 || rate == 4 || rate == 11 || rate == 22)
2959 			masks |= RATE_MCS_CCK_MSK;
2960 		masks |= RATE_MCS_ANT_B_MSK;
2961 		masks &= ~RATE_MCS_ANT_A_MSK;
2962 		link_quality.rate_n_flags[i] =
2963 		    iwk_rate_to_plcp(rate) | masks;
2964 	}
2965 
2966 	link_quality.general_params.single_stream_ant_msk = 2;
2967 	link_quality.general_params.dual_stream_ant_msk = 3;
2968 	link_quality.agg_params.agg_dis_start_th = 3;
2969 	link_quality.agg_params.agg_time_limit = LE_16(4000);
2970 	link_quality.sta_id = IWK_AP_ID;
2971 	err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
2972 	    sizeof (link_quality), 1);
2973 	if (err != IWK_SUCCESS) {
2974 		cmn_err(CE_WARN, "iwk_hw_set_before_auth(): "
2975 		    "failed to config link quality table\n");
2976 		return (err);
2977 	}
2978 
2979 	return (IWK_SUCCESS);
2980 }
2981 
2982 /*
2983  * Send a scan request(assembly scan cmd) to the firmware.
2984  */
2985 static int
2986 iwk_scan(iwk_sc_t *sc)
2987 {
2988 	ieee80211com_t *ic = &sc->sc_ic;
2989 	iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM];
2990 	iwk_tx_desc_t *desc;
2991 	iwk_tx_data_t *data;
2992 	iwk_cmd_t *cmd;
2993 	iwk_scan_hdr_t *hdr;
2994 	iwk_scan_chan_t *chan;
2995 	struct ieee80211_frame *wh;
2996 	ieee80211_node_t *in = ic->ic_bss;
2997 	struct ieee80211_rateset *rs;
2998 	enum ieee80211_phymode mode;
2999 	uint8_t *frm;
3000 	int i, pktlen, nrates;
3001 
3002 	data = &ring->data[ring->cur];
3003 	desc = data->desc;
3004 	cmd = (iwk_cmd_t *)data->dma_data.mem_va;
3005 
3006 	cmd->hdr.type = REPLY_SCAN_CMD;
3007 	cmd->hdr.flags = 0;
3008 	cmd->hdr.qid = ring->qid;
3009 	cmd->hdr.idx = ring->cur | 0x40;
3010 
3011 	hdr = (iwk_scan_hdr_t *)cmd->data;
3012 	(void) memset(hdr, 0, sizeof (iwk_scan_hdr_t));
3013 	hdr->nchan = 11;
3014 	hdr->quiet_time = LE_16(5);
3015 	hdr->quiet_plcp_th = LE_16(1);
3016 
3017 	hdr->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
3018 	hdr->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3019 	    LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) |
3020 	    (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3021 	    (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3022 
3023 	hdr->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
3024 	hdr->tx_cmd.sta_id = IWK_BROADCAST_ID;
3025 	hdr->tx_cmd.stop_time.life_time = 0xffffffff;
3026 	hdr->tx_cmd.tx_flags |= (0x200);
3027 	hdr->tx_cmd.rate.r.rate_n_flags = iwk_rate_to_plcp(2);
3028 	hdr->tx_cmd.rate.r.rate_n_flags |=
3029 	    (RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK);
3030 	hdr->direct_scan[0].len = ic->ic_des_esslen;
3031 	hdr->direct_scan[0].id  = IEEE80211_ELEMID_SSID;
3032 
3033 	if (ic->ic_des_esslen)
3034 		bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid,
3035 		    ic->ic_des_esslen);
3036 	else
3037 		bzero(hdr->direct_scan[0].ssid,
3038 		    sizeof (hdr->direct_scan[0].ssid));
3039 	/*
3040 	 * a probe request frame is required after the REPLY_SCAN_CMD
3041 	 */
3042 	wh = (struct ieee80211_frame *)(hdr + 1);
3043 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
3044 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
3045 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
3046 	(void) memset(wh->i_addr1, 0xff, 6);
3047 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr);
3048 	(void) memset(wh->i_addr3, 0xff, 6);
3049 	*(uint16_t *)&wh->i_dur[0] = 0;
3050 	*(uint16_t *)&wh->i_seq[0] = 0;
3051 
3052 	frm = (uint8_t *)(wh + 1);
3053 
3054 	/* essid IE */
3055 	*frm++ = IEEE80211_ELEMID_SSID;
3056 	*frm++ = in->in_esslen;
3057 	(void) memcpy(frm, in->in_essid, in->in_esslen);
3058 	frm += in->in_esslen;
3059 
3060 	mode = ieee80211_chan2mode(ic, ic->ic_curchan);
3061 	rs = &ic->ic_sup_rates[mode];
3062 
3063 	/* supported rates IE */
3064 	*frm++ = IEEE80211_ELEMID_RATES;
3065 	nrates = rs->ir_nrates;
3066 	if (nrates > IEEE80211_RATE_SIZE)
3067 		nrates = IEEE80211_RATE_SIZE;
3068 	*frm++ = (uint8_t)nrates;
3069 	(void) memcpy(frm, rs->ir_rates, nrates);
3070 	frm += nrates;
3071 
3072 	/* supported xrates IE */
3073 	if (rs->ir_nrates > IEEE80211_RATE_SIZE) {
3074 		nrates = rs->ir_nrates - IEEE80211_RATE_SIZE;
3075 		*frm++ = IEEE80211_ELEMID_XRATES;
3076 		*frm++ = (uint8_t)nrates;
3077 		(void) memcpy(frm, rs->ir_rates + IEEE80211_RATE_SIZE, nrates);
3078 		frm += nrates;
3079 	}
3080 
3081 	/* optionnal IE (usually for wpa) */
3082 	if (ic->ic_opt_ie != NULL) {
3083 		(void) memcpy(frm, ic->ic_opt_ie, ic->ic_opt_ie_len);
3084 		frm += ic->ic_opt_ie_len;
3085 	}
3086 
3087 	/* setup length of probe request */
3088 	hdr->tx_cmd.len = LE_16(frm - (uint8_t *)wh);
3089 	hdr->len = hdr->nchan * sizeof (iwk_scan_chan_t) +
3090 	    hdr->tx_cmd.len + sizeof (iwk_scan_hdr_t);
3091 
3092 	/*
3093 	 * the attribute of the scan channels are required after the probe
3094 	 * request frame.
3095 	 */
3096 	chan = (iwk_scan_chan_t *)frm;
3097 	for (i = 1; i <= hdr->nchan; i++, chan++) {
3098 		chan->type = 3;
3099 		chan->chan = (uint8_t)i;
3100 		chan->tpc.tx_gain = 0x3f;
3101 		chan->tpc.dsp_atten = 110;
3102 		chan->active_dwell = LE_16(20);
3103 		chan->passive_dwell = LE_16(120);
3104 
3105 		frm += sizeof (iwk_scan_chan_t);
3106 	}
3107 
3108 	pktlen = frm - (uint8_t *)cmd;
3109 
3110 	(void) memset(desc, 0, sizeof (*desc));
3111 	desc->val0 = LE_32(1 << 24);
3112 	desc->pa[0].tb1_addr =
3113 	    (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff);
3114 	desc->pa[0].val1 = (pktlen << 4) & 0xfff0;
3115 
3116 	/*
3117 	 * maybe for cmd, filling the byte cnt table is not necessary.
3118 	 * anyway, we fill it here.
3119 	 */
3120 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid]
3121 	    .tfd_offset[ring->cur].val = 8;
3122 	if (ring->cur < IWK_MAX_WIN_SIZE) {
3123 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid]
3124 		    .tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8;
3125 	}
3126 
3127 	/* kick cmd ring */
3128 	ring->cur = (ring->cur + 1) % ring->count;
3129 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3130 
3131 	return (IWK_SUCCESS);
3132 }
3133 
3134 static int
3135 iwk_config(iwk_sc_t *sc)
3136 {
3137 	ieee80211com_t *ic = &sc->sc_ic;
3138 	iwk_tx_power_table_cmd_t txpower;
3139 	iwk_powertable_cmd_t powertable;
3140 	iwk_bt_cmd_t bt;
3141 	iwk_add_sta_t node;
3142 	iwk_link_quality_cmd_t link_quality;
3143 	int i, err;
3144 	uint16_t masks = 0;
3145 
3146 	/*
3147 	 * set power mode. Disable power management at present, do it later
3148 	 */
3149 	(void) memset(&powertable, 0, sizeof (powertable));
3150 	powertable.flags = LE_16(0x8);
3151 	err = iwk_cmd(sc, POWER_TABLE_CMD, &powertable,
3152 	    sizeof (powertable), 0);
3153 	if (err != IWK_SUCCESS) {
3154 		cmn_err(CE_WARN, "iwk_config(): failed to set power mode\n");
3155 		return (err);
3156 	}
3157 
3158 	/* configure bt coexistence */
3159 	(void) memset(&bt, 0, sizeof (bt));
3160 	bt.flags = 3;
3161 	bt.lead_time = 0xaa;
3162 	bt.max_kill = 1;
3163 	err = iwk_cmd(sc, REPLY_BT_CONFIG, &bt,
3164 	    sizeof (bt), 0);
3165 	if (err != IWK_SUCCESS) {
3166 		cmn_err(CE_WARN,
3167 		    "iwk_config(): "
3168 		    "failed to configurate bt coexistence\n");
3169 		return (err);
3170 	}
3171 
3172 	/* configure rxon */
3173 	(void) memset(&sc->sc_config, 0, sizeof (iwk_rxon_cmd_t));
3174 	IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
3175 	IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
3176 	sc->sc_config.chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
3177 	sc->sc_config.flags = (RXON_FLG_TSF2HOST_MSK | RXON_FLG_AUTO_DETECT_MSK
3178 	    | RXON_FLG_BAND_24G_MSK);
3179 	sc->sc_config.flags &= (~RXON_FLG_CCK_MSK);
3180 	switch (ic->ic_opmode) {
3181 	case IEEE80211_M_STA:
3182 		sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
3183 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3184 		    RXON_FILTER_DIS_DECRYPT_MSK |
3185 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
3186 		break;
3187 	case IEEE80211_M_IBSS:
3188 	case IEEE80211_M_AHDEMO:
3189 		sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
3190 		break;
3191 	case IEEE80211_M_HOSTAP:
3192 		sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
3193 		break;
3194 	case IEEE80211_M_MONITOR:
3195 		sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
3196 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3197 		    RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
3198 		break;
3199 	}
3200 	sc->sc_config.cck_basic_rates  = 0x0f;
3201 	sc->sc_config.ofdm_basic_rates = 0xff;
3202 
3203 	sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
3204 	sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff;
3205 
3206 	/* set antenna */
3207 
3208 	sc->sc_config.rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3209 	    LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) |
3210 	    (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3211 	    (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3212 
3213 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
3214 	    sizeof (iwk_rxon_cmd_t), 0);
3215 	if (err != IWK_SUCCESS) {
3216 		cmn_err(CE_WARN, "iwk_config(): "
3217 		    "failed to set configure command\n");
3218 		return (err);
3219 	}
3220 
3221 	/*
3222 	 * set Tx power for 2.4GHz channels
3223 	 * (need further investigation. fix tx power at present)
3224 	 */
3225 	(void) memset(&txpower, 0, sizeof (txpower));
3226 	txpower.band = 1; /* for 2.4G */
3227 	txpower.channel = sc->sc_config.chan;
3228 	txpower.channel_normal_width = 0;
3229 	for (i = 0; i < POWER_TABLE_NUM_HT_OFDM_ENTRIES; i++) {
3230 		txpower.tx_power.ht_ofdm_power[i]
3231 		    .s.ramon_tx_gain = 0x3f3f;
3232 		txpower.tx_power.ht_ofdm_power[i]
3233 		    .s.dsp_predis_atten = 110 | (110 << 8);
3234 	}
3235 	txpower.tx_power.ht_ofdm_power[POWER_TABLE_NUM_HT_OFDM_ENTRIES]
3236 	    .s.ramon_tx_gain = 0x3f3f;
3237 	txpower.tx_power.ht_ofdm_power[POWER_TABLE_NUM_HT_OFDM_ENTRIES]
3238 	    .s.dsp_predis_atten = 110 | (110 << 8);
3239 	err = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &txpower,
3240 	    sizeof (txpower), 0);
3241 	if (err != IWK_SUCCESS) {
3242 		cmn_err(CE_WARN, "iwk_config(): failed to set txpower\n");
3243 		return (err);
3244 	}
3245 
3246 	/* add broadcast node so that we can send broadcast frame */
3247 	(void) memset(&node, 0, sizeof (node));
3248 	(void) memset(node.bssid, 0xff, 6);
3249 	node.id = IWK_BROADCAST_ID;
3250 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0);
3251 	if (err != IWK_SUCCESS) {
3252 		cmn_err(CE_WARN, "iwk_config(): "
3253 		    "failed to add broadcast node\n");
3254 		return (err);
3255 	}
3256 
3257 	/* TX_LINK_QUALITY cmd ? */
3258 	(void) memset(&link_quality, 0, sizeof (link_quality));
3259 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3260 		masks |= RATE_MCS_CCK_MSK;
3261 		masks |= RATE_MCS_ANT_B_MSK;
3262 		masks &= ~RATE_MCS_ANT_A_MSK;
3263 		link_quality.rate_n_flags[i] = iwk_rate_to_plcp(2) | masks;
3264 	}
3265 
3266 	link_quality.general_params.single_stream_ant_msk = 2;
3267 	link_quality.general_params.dual_stream_ant_msk = 3;
3268 	link_quality.agg_params.agg_dis_start_th = 3;
3269 	link_quality.agg_params.agg_time_limit = LE_16(4000);
3270 	link_quality.sta_id = IWK_BROADCAST_ID;
3271 	err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
3272 	    sizeof (link_quality), 0);
3273 	if (err != IWK_SUCCESS) {
3274 		cmn_err(CE_WARN, "iwk_config(): "
3275 		    "failed to config link quality table\n");
3276 		return (err);
3277 	}
3278 
3279 	return (IWK_SUCCESS);
3280 }
3281 
3282 static void
3283 iwk_stop_master(iwk_sc_t *sc)
3284 {
3285 	uint32_t tmp;
3286 	int n;
3287 
3288 	tmp = IWK_READ(sc, CSR_RESET);
3289 	IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER);
3290 
3291 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
3292 	if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) ==
3293 	    CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE)
3294 		return;
3295 
3296 	for (n = 0; n < 2000; n++) {
3297 		if (IWK_READ(sc, CSR_RESET) &
3298 		    CSR_RESET_REG_FLAG_MASTER_DISABLED)
3299 			break;
3300 		DELAY(1000);
3301 	}
3302 	if (n == 2000)
3303 		IWK_DBG((IWK_DEBUG_HW,
3304 		    "timeout waiting for master stop\n"));
3305 }
3306 
3307 static int
3308 iwk_power_up(iwk_sc_t *sc)
3309 {
3310 	uint32_t tmp;
3311 
3312 	iwk_mac_access_enter(sc);
3313 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3314 	tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC;
3315 	tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN;
3316 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3317 	iwk_mac_access_exit(sc);
3318 
3319 	DELAY(5000);
3320 	return (IWK_SUCCESS);
3321 }
3322 
3323 static int
3324 iwk_preinit(iwk_sc_t *sc)
3325 {
3326 	uint32_t tmp;
3327 	int n;
3328 	uint8_t vlink;
3329 
3330 	/* clear any pending interrupts */
3331 	IWK_WRITE(sc, CSR_INT, 0xffffffff);
3332 
3333 	tmp = IWK_READ(sc, CSR_GIO_CHICKEN_BITS);
3334 	IWK_WRITE(sc, CSR_GIO_CHICKEN_BITS,
3335 	    tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
3336 
3337 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
3338 	IWK_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
3339 
3340 	/* wait for clock ready */
3341 	for (n = 0; n < 1000; n++) {
3342 		if (IWK_READ(sc, CSR_GP_CNTRL) &
3343 		    CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY)
3344 			break;
3345 		DELAY(10);
3346 	}
3347 	if (n == 1000) {
3348 		return (ETIMEDOUT);
3349 	}
3350 	iwk_mac_access_enter(sc);
3351 	tmp = iwk_reg_read(sc, APMG_CLK_CTRL_REG);
3352 	iwk_reg_write(sc, APMG_CLK_CTRL_REG, tmp |
3353 	    APMG_CLK_REG_VAL_DMA_CLK_RQT | APMG_CLK_REG_VAL_BSM_CLK_RQT);
3354 
3355 	DELAY(20);
3356 	tmp = iwk_reg_read(sc, ALM_APMG_PCIDEV_STT);
3357 	iwk_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp |
3358 	    APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE);
3359 	iwk_mac_access_exit(sc);
3360 
3361 	IWK_WRITE(sc, CSR_INT_COALESCING, 512 / 32); /* ??? */
3362 
3363 	(void) iwk_power_up(sc);
3364 
3365 	if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) {
3366 		tmp = ddi_get32(sc->sc_cfg_handle,
3367 		    (uint32_t *)(sc->sc_cfg_base + 0xe8));
3368 		ddi_put32(sc->sc_cfg_handle,
3369 		    (uint32_t *)(sc->sc_cfg_base + 0xe8),
3370 		    tmp & ~(1 << 11));
3371 	}
3372 
3373 
3374 	vlink = ddi_get8(sc->sc_cfg_handle,
3375 	    (uint8_t *)(sc->sc_cfg_base + 0xf0));
3376 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0),
3377 	    vlink & ~2);
3378 
3379 	tmp = IWK_READ(sc, CSR_SW_VER);
3380 	tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
3381 	    CSR_HW_IF_CONFIG_REG_BIT_MAC_SI | CSR_HW_IF_CONFIG_REG_BIT_KEDRON_R;
3382 	IWK_WRITE(sc, CSR_SW_VER, tmp);
3383 
3384 	/* make sure power supply on each part of the hardware */
3385 	iwk_mac_access_enter(sc);
3386 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3387 	tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
3388 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3389 	DELAY(5);
3390 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3391 	tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
3392 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3393 	iwk_mac_access_exit(sc);
3394 	return (IWK_SUCCESS);
3395 }
3396 
3397 /*
3398  * set up semphore flag to own EEPROM
3399  */
3400 static int iwk_eep_sem_down(iwk_sc_t *sc)
3401 {
3402 	int count1, count2;
3403 	uint32_t tmp;
3404 
3405 	for (count1 = 0; count1 < 1000; count1++) {
3406 		tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG);
3407 		IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG,
3408 		    tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM);
3409 
3410 		for (count2 = 0; count2 < 2; count2++) {
3411 			if (IWK_READ(sc, CSR_HW_IF_CONFIG_REG) &
3412 			    CSR_HW_IF_CONFIG_REG_EEP_SEM)
3413 				return (IWK_SUCCESS);
3414 			DELAY(10000);
3415 		}
3416 	}
3417 	return (IWK_FAIL);
3418 }
3419 
3420 /*
3421  * reset semphore flag to release EEPROM
3422  */
3423 static void iwk_eep_sem_up(iwk_sc_t *sc)
3424 {
3425 	uint32_t tmp;
3426 
3427 	tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG);
3428 	IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG,
3429 	    tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM));
3430 }
3431 
3432 /*
3433  * This function load all infomation in eeprom into iwk_eep
3434  * structure in iwk_sc_t structure
3435  */
3436 static int iwk_eep_load(iwk_sc_t *sc)
3437 {
3438 	int i, rr;
3439 	uint32_t rv, tmp, eep_gp;
3440 	uint16_t addr, eep_sz = sizeof (sc->sc_eep_map);
3441 	uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map;
3442 
3443 	/* read eeprom gp register in CSR */
3444 	eep_gp = IWK_READ(sc, CSR_EEPROM_GP);
3445 	if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) ==
3446 	    CSR_EEPROM_GP_BAD_SIGNATURE) {
3447 		IWK_DBG((IWK_DEBUG_EEPROM, "not find eeprom\n"));
3448 		return (IWK_FAIL);
3449 	}
3450 
3451 	rr = iwk_eep_sem_down(sc);
3452 	if (rr != 0) {
3453 		IWK_DBG((IWK_DEBUG_EEPROM, "driver failed to own EEPROM\n"));
3454 		return (IWK_FAIL);
3455 	}
3456 
3457 	for (addr = 0; addr < eep_sz; addr += 2) {
3458 		IWK_WRITE(sc, CSR_EEPROM_REG, addr<<1);
3459 		tmp = IWK_READ(sc, CSR_EEPROM_REG);
3460 		IWK_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2));
3461 
3462 		for (i = 0; i < 10; i++) {
3463 			rv = IWK_READ(sc, CSR_EEPROM_REG);
3464 			if (rv & 1)
3465 				break;
3466 			DELAY(10);
3467 		}
3468 
3469 		if (!(rv & 1)) {
3470 			IWK_DBG((IWK_DEBUG_EEPROM,
3471 			    "time out when read eeprome\n"));
3472 			iwk_eep_sem_up(sc);
3473 			return (IWK_FAIL);
3474 		}
3475 
3476 		eep_p[addr/2] = rv >> 16;
3477 	}
3478 
3479 	iwk_eep_sem_up(sc);
3480 	return (IWK_SUCCESS);
3481 }
3482 
3483 /*
3484  * init mac address in ieee80211com_t struct
3485  */
3486 static void iwk_get_mac_from_eep(iwk_sc_t *sc)
3487 {
3488 	ieee80211com_t *ic = &sc->sc_ic;
3489 	struct iwk_eep *ep = &sc->sc_eep_map;
3490 
3491 	IEEE80211_ADDR_COPY(ic->ic_macaddr, ep->mac_address);
3492 
3493 	IWK_DBG((IWK_DEBUG_EEPROM, "mac:%2x:%2x:%2x:%2x:%2x:%2x\n",
3494 	    ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2],
3495 	    ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5]));
3496 }
3497 
3498 static int
3499 iwk_init(iwk_sc_t *sc)
3500 {
3501 	int qid, n, err;
3502 	clock_t clk;
3503 	uint32_t tmp;
3504 
3505 	mutex_enter(&sc->sc_glock);
3506 	sc->sc_flags &= ~IWK_F_FW_INIT;
3507 
3508 	(void) iwk_preinit(sc);
3509 
3510 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
3511 	if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) {
3512 		cmn_err(CE_WARN, "iwk_init(): Radio transmitter is off\n");
3513 		goto fail1;
3514 	}
3515 
3516 	/* init Rx ring */
3517 	iwk_mac_access_enter(sc);
3518 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
3519 
3520 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
3521 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
3522 	    sc->sc_rxq.dma_desc.cookie.dmac_address >> 8);
3523 
3524 	IWK_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG,
3525 	    ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address +
3526 	    offsetof(struct iwk_shared, val0)) >> 4));
3527 
3528 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG,
3529 	    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
3530 	    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
3531 	    IWK_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
3532 	    (RX_QUEUE_SIZE_LOG <<
3533 	    FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
3534 	iwk_mac_access_exit(sc);
3535 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG,
3536 	    (RX_QUEUE_SIZE - 1) & ~0x7);
3537 
3538 	/* init Tx rings */
3539 	iwk_mac_access_enter(sc);
3540 	iwk_reg_write(sc, SCD_TXFACT, 0);
3541 
3542 	/* keep warm page */
3543 	iwk_reg_write(sc, IWK_FH_KW_MEM_ADDR_REG,
3544 	    sc->sc_dma_kw.cookie.dmac_address >> 4);
3545 
3546 	for (qid = 0; qid < IWK_NUM_QUEUES; qid++) {
3547 		IWK_WRITE(sc, FH_MEM_CBBC_QUEUE(qid),
3548 		    sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8);
3549 		IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(qid),
3550 		    IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
3551 		    IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
3552 	}
3553 	iwk_mac_access_exit(sc);
3554 
3555 	/* clear "radio off" and "disable command" bits */
3556 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
3557 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR,
3558 	    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3559 
3560 	/* clear any pending interrupts */
3561 	IWK_WRITE(sc, CSR_INT, 0xffffffff);
3562 
3563 	/* enable interrupts */
3564 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
3565 
3566 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
3567 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
3568 
3569 	/*
3570 	 * backup ucode data part for future use.
3571 	 */
3572 	(void) memcpy(sc->sc_dma_fw_data_bak.mem_va,
3573 	    sc->sc_dma_fw_data.mem_va,
3574 	    sc->sc_dma_fw_data.alength);
3575 
3576 	for (n = 0; n < 2; n++) {
3577 		/* load firmware init segment into NIC */
3578 		err = iwk_load_firmware(sc);
3579 		if (err != IWK_SUCCESS) {
3580 			cmn_err(CE_WARN, "iwk_init(): "
3581 			    "failed to setup boot firmware\n");
3582 			continue;
3583 		}
3584 
3585 		/* now press "execute" start running */
3586 		IWK_WRITE(sc, CSR_RESET, 0);
3587 		break;
3588 	}
3589 	if (n == 2) {
3590 		cmn_err(CE_WARN, "iwk_init(): " "failed to load firmware\n");
3591 		goto fail1;
3592 	}
3593 	/* ..and wait at most one second for adapter to initialize */
3594 	clk = ddi_get_lbolt() + drv_usectohz(2000000);
3595 	while (!(sc->sc_flags & IWK_F_FW_INIT)) {
3596 		if (cv_timedwait(&sc->sc_fw_cv, &sc->sc_glock, clk) < 0)
3597 			break;
3598 	}
3599 	if (!(sc->sc_flags & IWK_F_FW_INIT)) {
3600 		cmn_err(CE_WARN,
3601 		    "iwk_init(): timeout waiting for firmware init\n");
3602 		goto fail1;
3603 	}
3604 
3605 	/*
3606 	 * at this point, the firmware is loaded OK, then config the hardware
3607 	 * with the ucode API, including rxon, txpower, etc.
3608 	 */
3609 	err = iwk_config(sc);
3610 	if (err) {
3611 		cmn_err(CE_WARN, "iwk_init(): failed to configure device\n");
3612 		goto fail1;
3613 	}
3614 
3615 	/* at this point, hardware may receive beacons :) */
3616 	mutex_exit(&sc->sc_glock);
3617 	return (IWK_SUCCESS);
3618 
3619 fail1:
3620 	err = IWK_FAIL;
3621 	mutex_exit(&sc->sc_glock);
3622 	return (err);
3623 }
3624 
3625 static void
3626 iwk_stop(iwk_sc_t *sc)
3627 {
3628 	uint32_t tmp;
3629 	int i;
3630 
3631 
3632 	mutex_enter(&sc->sc_glock);
3633 
3634 	IWK_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3635 	/* disable interrupts */
3636 	IWK_WRITE(sc, CSR_INT_MASK, 0);
3637 	IWK_WRITE(sc, CSR_INT, CSR_INI_SET_MASK);
3638 	IWK_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff);
3639 
3640 	/* reset all Tx rings */
3641 	for (i = 0; i < IWK_NUM_QUEUES; i++)
3642 		iwk_reset_tx_ring(sc, &sc->sc_txq[i]);
3643 
3644 	/* reset Rx ring */
3645 	iwk_reset_rx_ring(sc);
3646 
3647 	iwk_mac_access_enter(sc);
3648 	iwk_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT);
3649 	iwk_mac_access_exit(sc);
3650 
3651 	DELAY(5);
3652 
3653 	iwk_stop_master(sc);
3654 
3655 	sc->sc_tx_timer = 0;
3656 	tmp = IWK_READ(sc, CSR_RESET);
3657 	IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET);
3658 	mutex_exit(&sc->sc_glock);
3659 }
3660 
3661 /*
3662  * Naive implementation of the Adaptive Multi Rate Retry algorithm:
3663  * "IEEE 802.11 Rate Adaptation: A Practical Approach"
3664  * Mathieu Lacage, Hossein Manshaei, Thierry Turletti
3665  * INRIA Sophia - Projet Planete
3666  * http://www-sop.inria.fr/rapports/sophia/RR-5208.html
3667  */
3668 #define	is_success(amrr)	\
3669 	((amrr)->retrycnt < (amrr)->txcnt / 10)
3670 #define	is_failure(amrr)	\
3671 	((amrr)->retrycnt > (amrr)->txcnt / 3)
3672 #define	is_enough(amrr)		\
3673 	((amrr)->txcnt > 100)
3674 #define	is_min_rate(in)		\
3675 	((in)->in_txrate == 0)
3676 #define	is_max_rate(in)		\
3677 	((in)->in_txrate == (in)->in_rates.ir_nrates - 1)
3678 #define	increase_rate(in)	\
3679 	((in)->in_txrate++)
3680 #define	decrease_rate(in)	\
3681 	((in)->in_txrate--)
3682 #define	reset_cnt(amrr)		\
3683 	{ (amrr)->txcnt = (amrr)->retrycnt = 0; }
3684 
3685 #define	IWK_AMRR_MIN_SUCCESS_THRESHOLD	 1
3686 #define	IWK_AMRR_MAX_SUCCESS_THRESHOLD	15
3687 
3688 static void
3689 iwk_amrr_init(iwk_amrr_t *amrr)
3690 {
3691 	amrr->success = 0;
3692 	amrr->recovery = 0;
3693 	amrr->txcnt = amrr->retrycnt = 0;
3694 	amrr->success_threshold = IWK_AMRR_MIN_SUCCESS_THRESHOLD;
3695 }
3696 
3697 static void
3698 iwk_amrr_timeout(iwk_sc_t *sc)
3699 {
3700 	ieee80211com_t *ic = &sc->sc_ic;
3701 
3702 	IWK_DBG((IWK_DEBUG_RATECTL, "iwk_amrr_timeout() enter\n"));
3703 	if (ic->ic_opmode == IEEE80211_M_STA)
3704 		iwk_amrr_ratectl(NULL, ic->ic_bss);
3705 	else
3706 		ieee80211_iterate_nodes(&ic->ic_sta, iwk_amrr_ratectl, NULL);
3707 	sc->sc_clk = ddi_get_lbolt();
3708 }
3709 
3710 /* ARGSUSED */
3711 static void
3712 iwk_amrr_ratectl(void *arg, ieee80211_node_t *in)
3713 {
3714 	iwk_amrr_t *amrr = (iwk_amrr_t *)in;
3715 	int need_change = 0;
3716 
3717 	if (is_success(amrr) && is_enough(amrr)) {
3718 		amrr->success++;
3719 		if (amrr->success >= amrr->success_threshold &&
3720 		    !is_max_rate(in)) {
3721 			amrr->recovery = 1;
3722 			amrr->success = 0;
3723 			increase_rate(in);
3724 			IWK_DBG((IWK_DEBUG_RATECTL,
3725 			    "AMRR increasing rate %d (txcnt=%d retrycnt=%d)\n",
3726 			    in->in_txrate, amrr->txcnt, amrr->retrycnt));
3727 			need_change = 1;
3728 		} else {
3729 			amrr->recovery = 0;
3730 		}
3731 	} else if (is_failure(amrr)) {
3732 		amrr->success = 0;
3733 		if (!is_min_rate(in)) {
3734 			if (amrr->recovery) {
3735 				amrr->success_threshold++;
3736 				if (amrr->success_threshold >
3737 				    IWK_AMRR_MAX_SUCCESS_THRESHOLD)
3738 					amrr->success_threshold =
3739 					    IWK_AMRR_MAX_SUCCESS_THRESHOLD;
3740 			} else {
3741 				amrr->success_threshold =
3742 				    IWK_AMRR_MIN_SUCCESS_THRESHOLD;
3743 			}
3744 			decrease_rate(in);
3745 			IWK_DBG((IWK_DEBUG_RATECTL,
3746 			    "AMRR decreasing rate %d (txcnt=%d retrycnt=%d)\n",
3747 			    in->in_txrate, amrr->txcnt, amrr->retrycnt));
3748 			need_change = 1;
3749 		}
3750 		amrr->recovery = 0;	/* paper is incorrect */
3751 	}
3752 
3753 	if (is_enough(amrr) || need_change)
3754 		reset_cnt(amrr);
3755 }
3756