xref: /titanic_52/usr/src/uts/common/io/iwk/iwk2.c (revision 55553f719b521a0bb4deab6efc944cd30c1a56aa)
1 /*
2  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright (c) 2007, Intel Corporation
8  * All rights reserved.
9  */
10 
11 /*
12  * Copyright (c) 2006
13  * Copyright (c) 2007
14  *	Damien Bergamini <damien.bergamini@free.fr>
15  *
16  * Permission to use, copy, modify, and distribute this software for any
17  * purpose with or without fee is hereby granted, provided that the above
18  * copyright notice and this permission notice appear in all copies.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
21  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
22  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
24  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
25  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
26  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27  */
28 
29 #pragma ident	"%Z%%M%	%I%	%E% SMI"
30 
31 /*
32  * Driver for Intel PRO/Wireless 4965AGN(kedron) 802.11 network adapters.
33  */
34 
35 #include <sys/types.h>
36 #include <sys/byteorder.h>
37 #include <sys/conf.h>
38 #include <sys/cmn_err.h>
39 #include <sys/stat.h>
40 #include <sys/ddi.h>
41 #include <sys/sunddi.h>
42 #include <sys/strsubr.h>
43 #include <sys/ethernet.h>
44 #include <inet/common.h>
45 #include <inet/nd.h>
46 #include <inet/mi.h>
47 #include <sys/note.h>
48 #include <sys/stream.h>
49 #include <sys/strsun.h>
50 #include <sys/modctl.h>
51 #include <sys/devops.h>
52 #include <sys/dlpi.h>
53 #include <sys/mac.h>
54 #include <sys/mac_wifi.h>
55 #include <sys/net80211.h>
56 #include <sys/net80211_proto.h>
57 #include <sys/varargs.h>
58 #include <sys/policy.h>
59 #include <sys/pci.h>
60 
61 #include "iwk_hw.h"
62 #include "iwk_eeprom.h"
63 #include "iwk2_var.h"
64 #include <inet/wifi_ioctl.h>
65 
66 #ifdef DEBUG
67 #define	IWK_DEBUG_80211		(1 << 0)
68 #define	IWK_DEBUG_CMD		(1 << 1)
69 #define	IWK_DEBUG_DMA		(1 << 2)
70 #define	IWK_DEBUG_EEPROM	(1 << 3)
71 #define	IWK_DEBUG_FW		(1 << 4)
72 #define	IWK_DEBUG_HW		(1 << 5)
73 #define	IWK_DEBUG_INTR		(1 << 6)
74 #define	IWK_DEBUG_MRR		(1 << 7)
75 #define	IWK_DEBUG_PIO		(1 << 8)
76 #define	IWK_DEBUG_RX		(1 << 9)
77 #define	IWK_DEBUG_SCAN		(1 << 10)
78 #define	IWK_DEBUG_TX		(1 << 11)
79 #define	IWK_DEBUG_RATECTL	(1 << 12)
80 #define	IWK_DEBUG_RADIO		(1 << 13)
81 #define	IWK_DEBUG_RESUME	(1 << 14)
82 uint32_t iwk_dbg_flags = 0;
83 #define	IWK_DBG(x) \
84 	iwk_dbg x
85 #else
86 #define	IWK_DBG(x)
87 #endif
88 
89 static void	*iwk_soft_state_p = NULL;
90 static uint8_t iwk_fw_bin [] = {
91 #include "fw-iw/iw4965.ucode.hex"
92 };
93 
94 /* DMA attributes for a shared page */
95 static ddi_dma_attr_t sh_dma_attr = {
96 	DMA_ATTR_V0,	/* version of this structure */
97 	0,		/* lowest usable address */
98 	0xffffffffU,	/* highest usable address */
99 	0xffffffffU,	/* maximum DMAable byte count */
100 	0x1000,		/* alignment in bytes */
101 	0x1000,		/* burst sizes (any?) */
102 	1,		/* minimum transfer */
103 	0xffffffffU,	/* maximum transfer */
104 	0xffffffffU,	/* maximum segment length */
105 	1,		/* maximum number of segments */
106 	1,		/* granularity */
107 	0,		/* flags (reserved) */
108 };
109 
110 /* DMA attributes for a keep warm DRAM descriptor */
111 static ddi_dma_attr_t kw_dma_attr = {
112 	DMA_ATTR_V0,	/* version of this structure */
113 	0,		/* lowest usable address */
114 	0xffffffffU,	/* highest usable address */
115 	0xffffffffU,	/* maximum DMAable byte count */
116 	0x1000,		/* alignment in bytes */
117 	0x1000,		/* burst sizes (any?) */
118 	1,		/* minimum transfer */
119 	0xffffffffU,	/* maximum transfer */
120 	0xffffffffU,	/* maximum segment length */
121 	1,		/* maximum number of segments */
122 	1,		/* granularity */
123 	0,		/* flags (reserved) */
124 };
125 
126 /* DMA attributes for a ring descriptor */
127 static ddi_dma_attr_t ring_desc_dma_attr = {
128 	DMA_ATTR_V0,	/* version of this structure */
129 	0,		/* lowest usable address */
130 	0xffffffffU,	/* highest usable address */
131 	0xffffffffU,	/* maximum DMAable byte count */
132 	0x100,		/* alignment in bytes */
133 	0x100,		/* burst sizes (any?) */
134 	1,		/* minimum transfer */
135 	0xffffffffU,	/* maximum transfer */
136 	0xffffffffU,	/* maximum segment length */
137 	1,		/* maximum number of segments */
138 	1,		/* granularity */
139 	0,		/* flags (reserved) */
140 };
141 
142 /* DMA attributes for a cmd */
143 static ddi_dma_attr_t cmd_dma_attr = {
144 	DMA_ATTR_V0,	/* version of this structure */
145 	0,		/* lowest usable address */
146 	0xffffffffU,	/* highest usable address */
147 	0xffffffffU,	/* maximum DMAable byte count */
148 	4,		/* alignment in bytes */
149 	0x100,		/* burst sizes (any?) */
150 	1,		/* minimum transfer */
151 	0xffffffffU,	/* maximum transfer */
152 	0xffffffffU,	/* maximum segment length */
153 	1,		/* maximum number of segments */
154 	1,		/* granularity */
155 	0,		/* flags (reserved) */
156 };
157 
158 /* DMA attributes for a rx buffer */
159 static ddi_dma_attr_t rx_buffer_dma_attr = {
160 	DMA_ATTR_V0,	/* version of this structure */
161 	0,		/* lowest usable address */
162 	0xffffffffU,	/* highest usable address */
163 	0xffffffffU,	/* maximum DMAable byte count */
164 	0x100,		/* alignment in bytes */
165 	0x100,		/* burst sizes (any?) */
166 	1,		/* minimum transfer */
167 	0xffffffffU,	/* maximum transfer */
168 	0xffffffffU,	/* maximum segment length */
169 	1,		/* maximum number of segments */
170 	1,		/* granularity */
171 	0,		/* flags (reserved) */
172 };
173 
174 /*
175  * DMA attributes for a tx buffer.
176  * the maximum number of segments is 4 for the hardware.
177  * now all the wifi drivers put the whole frame in a single
178  * descriptor, so we define the maximum  number of segments 1,
179  * just the same as the rx_buffer. we consider leverage the HW
180  * ability in the future, that is why we don't define rx and tx
181  * buffer_dma_attr as the same.
182  */
183 static ddi_dma_attr_t tx_buffer_dma_attr = {
184 	DMA_ATTR_V0,	/* version of this structure */
185 	0,		/* lowest usable address */
186 	0xffffffffU,	/* highest usable address */
187 	0xffffffffU,	/* maximum DMAable byte count */
188 	4,		/* alignment in bytes */
189 	0x100,		/* burst sizes (any?) */
190 	1,		/* minimum transfer */
191 	0xffffffffU,	/* maximum transfer */
192 	0xffffffffU,	/* maximum segment length */
193 	1,		/* maximum number of segments */
194 	1,		/* granularity */
195 	0,		/* flags (reserved) */
196 };
197 
198 /* DMA attributes for text and data part in the firmware */
199 static ddi_dma_attr_t fw_dma_attr = {
200 	DMA_ATTR_V0,	/* version of this structure */
201 	0,		/* lowest usable address */
202 	0xffffffffU,	/* highest usable address */
203 	0x7fffffff,	/* maximum DMAable byte count */
204 	0x10,		/* alignment in bytes */
205 	0x100,		/* burst sizes (any?) */
206 	1,		/* minimum transfer */
207 	0xffffffffU,	/* maximum transfer */
208 	0xffffffffU,	/* maximum segment length */
209 	1,		/* maximum number of segments */
210 	1,		/* granularity */
211 	0,		/* flags (reserved) */
212 };
213 
214 
215 /* regs access attributes */
216 static ddi_device_acc_attr_t iwk_reg_accattr = {
217 	DDI_DEVICE_ATTR_V0,
218 	DDI_STRUCTURE_LE_ACC,
219 	DDI_STRICTORDER_ACC,
220 	DDI_DEFAULT_ACC
221 };
222 
223 /* DMA access attributes */
224 static ddi_device_acc_attr_t iwk_dma_accattr = {
225 	DDI_DEVICE_ATTR_V0,
226 	DDI_NEVERSWAP_ACC,
227 	DDI_STRICTORDER_ACC,
228 	DDI_DEFAULT_ACC
229 };
230 
231 static int	iwk_ring_init(iwk_sc_t *);
232 static void	iwk_ring_free(iwk_sc_t *);
233 static int	iwk_alloc_shared(iwk_sc_t *);
234 static void	iwk_free_shared(iwk_sc_t *);
235 static int	iwk_alloc_kw(iwk_sc_t *);
236 static void	iwk_free_kw(iwk_sc_t *);
237 static int	iwk_alloc_fw_dma(iwk_sc_t *);
238 static void	iwk_free_fw_dma(iwk_sc_t *);
239 static int	iwk_alloc_rx_ring(iwk_sc_t *);
240 static void	iwk_reset_rx_ring(iwk_sc_t *);
241 static void	iwk_free_rx_ring(iwk_sc_t *);
242 static int	iwk_alloc_tx_ring(iwk_sc_t *, iwk_tx_ring_t *,
243     int, int);
244 static void	iwk_reset_tx_ring(iwk_sc_t *, iwk_tx_ring_t *);
245 static void	iwk_free_tx_ring(iwk_sc_t *, iwk_tx_ring_t *);
246 
247 static ieee80211_node_t *iwk_node_alloc(ieee80211com_t *);
248 static void	iwk_node_free(ieee80211_node_t *);
249 static int	iwk_newstate(ieee80211com_t *, enum ieee80211_state, int);
250 static int	iwk_key_set(ieee80211com_t *, const struct ieee80211_key *,
251     const uint8_t mac[IEEE80211_ADDR_LEN]);
252 static void	iwk_mac_access_enter(iwk_sc_t *);
253 static void	iwk_mac_access_exit(iwk_sc_t *);
254 static uint32_t	iwk_reg_read(iwk_sc_t *, uint32_t);
255 static void	iwk_reg_write(iwk_sc_t *, uint32_t, uint32_t);
256 static void	iwk_reg_write_region_4(iwk_sc_t *, uint32_t,
257 		    uint32_t *, int);
258 static int	iwk_load_firmware(iwk_sc_t *);
259 static void	iwk_rx_intr(iwk_sc_t *, iwk_rx_desc_t *,
260 		    iwk_rx_data_t *);
261 static void	iwk_tx_intr(iwk_sc_t *, iwk_rx_desc_t *,
262 		    iwk_rx_data_t *);
263 static void	iwk_cmd_intr(iwk_sc_t *, iwk_rx_desc_t *);
264 static uint_t	iwk_intr(caddr_t);
265 static int	iwk_eep_load(iwk_sc_t *sc);
266 static void	iwk_get_mac_from_eep(iwk_sc_t *sc);
267 static int	iwk_eep_sem_down(iwk_sc_t *sc);
268 static void	iwk_eep_sem_up(iwk_sc_t *sc);
269 static uint_t	iwk_rx_softintr(caddr_t);
270 static uint8_t	iwk_rate_to_plcp(int);
271 static int	iwk_cmd(iwk_sc_t *, int, const void *, int, int);
272 static void	iwk_set_led(iwk_sc_t *, uint8_t, uint8_t, uint8_t);
273 static int	iwk_hw_set_before_auth(iwk_sc_t *);
274 static int	iwk_scan(iwk_sc_t *);
275 static int	iwk_config(iwk_sc_t *);
276 static void	iwk_stop_master(iwk_sc_t *);
277 static int	iwk_power_up(iwk_sc_t *);
278 static int	iwk_preinit(iwk_sc_t *);
279 static int	iwk_init(iwk_sc_t *);
280 static void	iwk_stop(iwk_sc_t *);
281 static void	iwk_amrr_init(iwk_amrr_t *);
282 static void	iwk_amrr_timeout(iwk_sc_t *);
283 static void	iwk_amrr_ratectl(void *, ieee80211_node_t *);
284 
285 static int iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
286 static int iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
287 
288 /*
289  * GLD specific operations
290  */
291 static int	iwk_m_stat(void *arg, uint_t stat, uint64_t *val);
292 static int	iwk_m_start(void *arg);
293 static void	iwk_m_stop(void *arg);
294 static int	iwk_m_unicst(void *arg, const uint8_t *macaddr);
295 static int	iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m);
296 static int	iwk_m_promisc(void *arg, boolean_t on);
297 static mblk_t  *iwk_m_tx(void *arg, mblk_t *mp);
298 static void	iwk_m_ioctl(void *arg, queue_t *wq, mblk_t *mp);
299 
300 static void	iwk_destroy_locks(iwk_sc_t *sc);
301 static int	iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type);
302 static void	iwk_thread(iwk_sc_t *sc);
303 
304 /*
305  * Supported rates for 802.11b/g modes (in 500Kbps unit).
306  * 11a and 11n support will be added later.
307  */
308 static const struct ieee80211_rateset iwk_rateset_11b =
309 	{ 4, { 2, 4, 11, 22 } };
310 
311 static const struct ieee80211_rateset iwk_rateset_11g =
312 	{ 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
313 
314 /*
315  * For mfthread only
316  */
317 extern pri_t minclsyspri;
318 
319 #define	DRV_NAME_4965	"iwk"
320 
321 /*
322  * Module Loading Data & Entry Points
323  */
324 DDI_DEFINE_STREAM_OPS(iwk_devops, nulldev, nulldev, iwk_attach,
325     iwk_detach, nodev, NULL, D_MP, NULL);
326 
327 static struct modldrv iwk_modldrv = {
328 	&mod_driverops,
329 	"Intel(R) 4965AGN driver(N)",
330 	&iwk_devops
331 };
332 
333 static struct modlinkage iwk_modlinkage = {
334 	MODREV_1,
335 	&iwk_modldrv,
336 	NULL
337 };
338 
339 int
340 _init(void)
341 {
342 	int	status;
343 
344 	status = ddi_soft_state_init(&iwk_soft_state_p,
345 	    sizeof (iwk_sc_t), 1);
346 	if (status != DDI_SUCCESS)
347 		return (status);
348 
349 	mac_init_ops(&iwk_devops, DRV_NAME_4965);
350 	status = mod_install(&iwk_modlinkage);
351 	if (status != DDI_SUCCESS) {
352 		mac_fini_ops(&iwk_devops);
353 		ddi_soft_state_fini(&iwk_soft_state_p);
354 	}
355 
356 	return (status);
357 }
358 
359 int
360 _fini(void)
361 {
362 	int status;
363 
364 	status = mod_remove(&iwk_modlinkage);
365 	if (status == DDI_SUCCESS) {
366 		mac_fini_ops(&iwk_devops);
367 		ddi_soft_state_fini(&iwk_soft_state_p);
368 	}
369 
370 	return (status);
371 }
372 
373 int
374 _info(struct modinfo *mip)
375 {
376 	return (mod_info(&iwk_modlinkage, mip));
377 }
378 
379 /*
380  * Mac Call Back entries
381  */
382 mac_callbacks_t	iwk_m_callbacks = {
383 	MC_IOCTL,
384 	iwk_m_stat,
385 	iwk_m_start,
386 	iwk_m_stop,
387 	iwk_m_promisc,
388 	iwk_m_multicst,
389 	iwk_m_unicst,
390 	iwk_m_tx,
391 	NULL,
392 	iwk_m_ioctl
393 };
394 
395 #ifdef DEBUG
396 void
397 iwk_dbg(uint32_t flags, const char *fmt, ...)
398 {
399 	va_list	ap;
400 
401 	if (flags & iwk_dbg_flags) {
402 		va_start(ap, fmt);
403 		vcmn_err(CE_NOTE, fmt, ap);
404 		va_end(ap);
405 	}
406 }
407 #endif
408 
409 /*
410  * device operations
411  */
412 int
413 iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
414 {
415 	iwk_sc_t		*sc;
416 	ieee80211com_t	*ic;
417 	int			instance, err, i;
418 	char			strbuf[32];
419 	wifi_data_t		wd = { 0 };
420 	mac_register_t		*macp;
421 
422 	if (cmd != DDI_ATTACH) {
423 		err = DDI_FAILURE;
424 		goto attach_fail1;
425 	}
426 
427 	instance = ddi_get_instance(dip);
428 	err = ddi_soft_state_zalloc(iwk_soft_state_p, instance);
429 	if (err != DDI_SUCCESS) {
430 		cmn_err(CE_WARN,
431 		    "iwk_attach(): failed to allocate soft state\n");
432 		goto attach_fail1;
433 	}
434 	sc = ddi_get_soft_state(iwk_soft_state_p, instance);
435 	sc->sc_dip = dip;
436 
437 	err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0,
438 	    &iwk_reg_accattr, &sc->sc_cfg_handle);
439 	if (err != DDI_SUCCESS) {
440 		cmn_err(CE_WARN,
441 		    "iwk_attach(): failed to map config spaces regs\n");
442 		goto attach_fail2;
443 	}
444 	sc->sc_rev = ddi_get8(sc->sc_cfg_handle,
445 	    (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID));
446 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0x41), 0);
447 	sc->sc_clsz = ddi_get16(sc->sc_cfg_handle,
448 	    (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ));
449 	if (!sc->sc_clsz)
450 		sc->sc_clsz = 16;
451 	sc->sc_clsz = (sc->sc_clsz << 2);
452 	sc->sc_dmabuf_sz = roundup(0x1000 + sizeof (struct ieee80211_frame) +
453 	    IEEE80211_MTU + IEEE80211_CRC_LEN +
454 	    (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
455 	    IEEE80211_WEP_CRCLEN), sc->sc_clsz);
456 	/*
457 	 * Map operating registers
458 	 */
459 	err = ddi_regs_map_setup(dip, 1, &sc->sc_base,
460 	    0, 0, &iwk_reg_accattr, &sc->sc_handle);
461 	if (err != DDI_SUCCESS) {
462 		cmn_err(CE_WARN,
463 		    "iwk_attach(): failed to map device regs\n");
464 		goto attach_fail2a;
465 	}
466 
467 	/*
468 	 * Initialize mutexs and condvars
469 	 */
470 	err = ddi_get_iblock_cookie(dip, 0, &sc->sc_iblk);
471 	if (err != DDI_SUCCESS) {
472 		cmn_err(CE_WARN,
473 		    "iwk_attach(): failed to do ddi_get_iblock_cookie()\n");
474 		goto attach_fail2b;
475 	}
476 	mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER, sc->sc_iblk);
477 	mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER, sc->sc_iblk);
478 	cv_init(&sc->sc_fw_cv, NULL, CV_DRIVER, NULL);
479 	cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL);
480 	cv_init(&sc->sc_tx_cv, "tx-ring", CV_DRIVER, NULL);
481 	/*
482 	 * initialize the mfthread
483 	 */
484 	mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER,
485 	    (void *) sc->sc_iblk);
486 	cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL);
487 	sc->sc_mf_thread = NULL;
488 	sc->sc_mf_thread_switch = 0;
489 
490 	/*
491 	 * Allocate shared page.
492 	 */
493 	err = iwk_alloc_shared(sc);
494 	if (err != DDI_SUCCESS) {
495 		cmn_err(CE_WARN, "failed to allocate shared page\n");
496 		goto attach_fail3;
497 	}
498 
499 	/*
500 	 * Allocate keep warm page.
501 	 */
502 	err = iwk_alloc_kw(sc);
503 	if (err != DDI_SUCCESS) {
504 		cmn_err(CE_WARN, "failed to allocate keep warm page\n");
505 		goto attach_fail3a;
506 	}
507 
508 	/*
509 	 * Do some necessary hardware initializations.
510 	 */
511 	err = iwk_preinit(sc);
512 	if (err != DDI_SUCCESS) {
513 		cmn_err(CE_WARN, "failed to init hardware\n");
514 		goto attach_fail4;
515 	}
516 
517 	/* initialize EEPROM */
518 	err = iwk_eep_load(sc);  /* get hardware configurations from eeprom */
519 	if (err != 0) {
520 		cmn_err(CE_WARN, "iwk_attach(): failed to load eeprom\n");
521 		goto attach_fail4;
522 	}
523 
524 	if (sc->sc_eep_map.calib_version < EEP_TX_POWER_VERSION_NEW) {
525 		IWK_DBG((IWK_DEBUG_EEPROM, "older EEPROM detected"));
526 		goto attach_fail4;
527 	}
528 
529 	iwk_get_mac_from_eep(sc);
530 
531 	err = iwk_ring_init(sc);
532 	if (err != DDI_SUCCESS) {
533 		cmn_err(CE_WARN, "iwk_attach(): "
534 		    "failed to allocate and initialize ring\n");
535 		goto attach_fail4;
536 	}
537 
538 	sc->sc_hdr = (iwk_firmware_hdr_t *)iwk_fw_bin;
539 
540 	err = iwk_alloc_fw_dma(sc);
541 	if (err != DDI_SUCCESS) {
542 		cmn_err(CE_WARN, "iwk_attach(): "
543 		    "failed to allocate firmware dma\n");
544 		goto attach_fail5;
545 	}
546 
547 	/*
548 	 * Initialize the wifi part, which will be used by
549 	 * generic layer
550 	 */
551 	ic = &sc->sc_ic;
552 	ic->ic_phytype  = IEEE80211_T_OFDM;
553 	ic->ic_opmode   = IEEE80211_M_STA; /* default to BSS mode */
554 	ic->ic_state    = IEEE80211_S_INIT;
555 	ic->ic_maxrssi  = 100; /* experimental number */
556 	ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT |
557 	    IEEE80211_C_PMGT | IEEE80211_C_SHSLOT;
558 	/*
559 	 * use software WEP and TKIP, hardware CCMP;
560 	 */
561 	ic->ic_caps |= IEEE80211_C_AES_CCM;
562 	/*
563 	 * Support WPA/WPA2
564 	 */
565 	ic->ic_caps |= IEEE80211_C_WPA;
566 	/* set supported .11b and .11g rates */
567 	ic->ic_sup_rates[IEEE80211_MODE_11B] = iwk_rateset_11b;
568 	ic->ic_sup_rates[IEEE80211_MODE_11G] = iwk_rateset_11g;
569 
570 	/* set supported .11b and .11g channels (1 through 14) */
571 	for (i = 1; i <= 14; i++) {
572 		ic->ic_sup_channels[i].ich_freq =
573 		    ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
574 		ic->ic_sup_channels[i].ich_flags =
575 		    IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
576 		    IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
577 	}
578 	ic->ic_ibss_chan = &ic->ic_sup_channels[0];
579 	ic->ic_xmit = iwk_send;
580 	/*
581 	 * init Wifi layer
582 	 */
583 	ieee80211_attach(ic);
584 
585 	/*
586 	 * different instance has different WPA door
587 	 */
588 	(void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
589 	    ddi_driver_name(dip),
590 	    ddi_get_instance(dip));
591 
592 	/*
593 	 * Override 80211 default routines
594 	 */
595 	sc->sc_newstate = ic->ic_newstate;
596 	ic->ic_newstate = iwk_newstate;
597 	ic->ic_node_alloc = iwk_node_alloc;
598 	ic->ic_node_free = iwk_node_free;
599 	ic->ic_crypto.cs_key_set = iwk_key_set;
600 	ieee80211_media_init(ic);
601 	/*
602 	 * initialize default tx key
603 	 */
604 	ic->ic_def_txkey = 0;
605 
606 	err = ddi_add_softintr(dip, DDI_SOFTINT_LOW,
607 	    &sc->sc_rx_softint_id, &sc->sc_iblk, NULL, iwk_rx_softintr,
608 	    (caddr_t)sc);
609 	if (err != DDI_SUCCESS) {
610 		cmn_err(CE_WARN,
611 		    "iwk_attach(): failed to do ddi_add_softintr()\n");
612 		goto attach_fail7;
613 	}
614 
615 	/*
616 	 * Add the interrupt handler
617 	 */
618 	err = ddi_add_intr(dip, 0, &sc->sc_iblk, NULL,
619 	    iwk_intr, (caddr_t)sc);
620 	if (err != DDI_SUCCESS) {
621 		cmn_err(CE_WARN,
622 		    "iwk_attach(): failed to do ddi_add_intr()\n");
623 		goto attach_fail8;
624 	}
625 
626 	/*
627 	 * Initialize pointer to device specific functions
628 	 */
629 	wd.wd_secalloc = WIFI_SEC_NONE;
630 	wd.wd_opmode = ic->ic_opmode;
631 	IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr);
632 
633 	macp = mac_alloc(MAC_VERSION);
634 	if (err != DDI_SUCCESS) {
635 		cmn_err(CE_WARN,
636 		    "iwk_attach(): failed to do mac_alloc()\n");
637 		goto attach_fail9;
638 	}
639 
640 	macp->m_type_ident	= MAC_PLUGIN_IDENT_WIFI;
641 	macp->m_driver		= sc;
642 	macp->m_dip		= dip;
643 	macp->m_src_addr	= ic->ic_macaddr;
644 	macp->m_callbacks	= &iwk_m_callbacks;
645 	macp->m_min_sdu		= 0;
646 	macp->m_max_sdu		= IEEE80211_MTU;
647 	macp->m_pdata		= &wd;
648 	macp->m_pdata_size	= sizeof (wd);
649 
650 	/*
651 	 * Register the macp to mac
652 	 */
653 	err = mac_register(macp, &ic->ic_mach);
654 	mac_free(macp);
655 	if (err != DDI_SUCCESS) {
656 		cmn_err(CE_WARN,
657 		    "iwk_attach(): failed to do mac_register()\n");
658 		goto attach_fail9;
659 	}
660 
661 	/*
662 	 * Create minor node of type DDI_NT_NET_WIFI
663 	 */
664 	(void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_4965"%d", instance);
665 	err = ddi_create_minor_node(dip, strbuf, S_IFCHR,
666 	    instance + 1, DDI_NT_NET_WIFI, 0);
667 	if (err != DDI_SUCCESS)
668 		cmn_err(CE_WARN,
669 		    "iwk_attach(): failed to do ddi_create_minor_node()\n");
670 
671 	/*
672 	 * Notify link is down now
673 	 */
674 	mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
675 
676 	/*
677 	 * create the mf thread to handle the link status,
678 	 * recovery fatal error, etc.
679 	 */
680 
681 	sc->sc_mf_thread_switch = 1;
682 	if (sc->sc_mf_thread == NULL)
683 		sc->sc_mf_thread = thread_create((caddr_t)NULL, 0,
684 		    iwk_thread, sc, 0, &p0, TS_RUN, minclsyspri);
685 
686 	sc->sc_flags |= IWK_F_ATTACHED;
687 
688 	return (DDI_SUCCESS);
689 attach_fail9:
690 	ddi_remove_intr(dip, 0, sc->sc_iblk);
691 attach_fail8:
692 	ddi_remove_softintr(sc->sc_rx_softint_id);
693 	sc->sc_rx_softint_id = NULL;
694 attach_fail7:
695 	ieee80211_detach(ic);
696 attach_fail6:
697 	iwk_free_fw_dma(sc);
698 attach_fail5:
699 	iwk_ring_free(sc);
700 attach_fail4:
701 	iwk_free_kw(sc);
702 attach_fail3a:
703 	iwk_free_shared(sc);
704 attach_fail3:
705 	iwk_destroy_locks(sc);
706 attach_fail2b:
707 	ddi_regs_map_free(&sc->sc_handle);
708 attach_fail2a:
709 	ddi_regs_map_free(&sc->sc_cfg_handle);
710 attach_fail2:
711 	ddi_soft_state_free(iwk_soft_state_p, instance);
712 attach_fail1:
713 	return (err);
714 }
715 
716 int
717 iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
718 {
719 	iwk_sc_t	*sc;
720 	int err;
721 
722 	sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip));
723 	ASSERT(sc != NULL);
724 
725 	if (cmd != DDI_DETACH)
726 		return (DDI_FAILURE);
727 
728 	if (!(sc->sc_flags & IWK_F_ATTACHED))
729 		return (DDI_FAILURE);
730 
731 	/*
732 	 * Destroy the mf_thread
733 	 */
734 	mutex_enter(&sc->sc_mt_lock);
735 	sc->sc_mf_thread_switch = 0;
736 	while (sc->sc_mf_thread != NULL) {
737 		if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0)
738 			break;
739 	}
740 	mutex_exit(&sc->sc_mt_lock);
741 
742 	iwk_stop(sc);
743 	DELAY(500000);
744 
745 	/*
746 	 * Unregiste from the MAC layer subsystem
747 	 */
748 	err = mac_unregister(sc->sc_ic.ic_mach);
749 	if (err != DDI_SUCCESS)
750 		return (err);
751 
752 	mutex_enter(&sc->sc_glock);
753 	iwk_free_fw_dma(sc);
754 	iwk_ring_free(sc);
755 	iwk_free_kw(sc);
756 	iwk_free_shared(sc);
757 	mutex_exit(&sc->sc_glock);
758 
759 	ddi_remove_intr(dip, 0, sc->sc_iblk);
760 	ddi_remove_softintr(sc->sc_rx_softint_id);
761 	sc->sc_rx_softint_id = NULL;
762 
763 	/*
764 	 * detach ieee80211
765 	 */
766 	ieee80211_detach(&sc->sc_ic);
767 
768 	iwk_destroy_locks(sc);
769 
770 	ddi_regs_map_free(&sc->sc_handle);
771 	ddi_regs_map_free(&sc->sc_cfg_handle);
772 	ddi_remove_minor_node(dip, NULL);
773 	ddi_soft_state_free(iwk_soft_state_p, ddi_get_instance(dip));
774 
775 	return (DDI_SUCCESS);
776 }
777 
778 static void
779 iwk_destroy_locks(iwk_sc_t *sc)
780 {
781 	cv_destroy(&sc->sc_mt_cv);
782 	mutex_destroy(&sc->sc_mt_lock);
783 	cv_destroy(&sc->sc_tx_cv);
784 	cv_destroy(&sc->sc_cmd_cv);
785 	cv_destroy(&sc->sc_fw_cv);
786 	mutex_destroy(&sc->sc_tx_lock);
787 	mutex_destroy(&sc->sc_glock);
788 }
789 
790 /*
791  * Allocate an area of memory and a DMA handle for accessing it
792  */
793 static int
794 iwk_alloc_dma_mem(iwk_sc_t *sc, size_t memsize,
795     ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p,
796     uint_t dma_flags, iwk_dma_t *dma_p)
797 {
798 	caddr_t vaddr;
799 	int err;
800 
801 	/*
802 	 * Allocate handle
803 	 */
804 	err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p,
805 	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
806 	if (err != DDI_SUCCESS) {
807 		dma_p->dma_hdl = NULL;
808 		return (DDI_FAILURE);
809 	}
810 
811 	/*
812 	 * Allocate memory
813 	 */
814 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
815 	    dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
816 	    DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
817 	if (err != DDI_SUCCESS) {
818 		ddi_dma_free_handle(&dma_p->dma_hdl);
819 		dma_p->dma_hdl = NULL;
820 		dma_p->acc_hdl = NULL;
821 		return (DDI_FAILURE);
822 	}
823 
824 	/*
825 	 * Bind the two together
826 	 */
827 	dma_p->mem_va = vaddr;
828 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
829 	    vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
830 	    &dma_p->cookie, &dma_p->ncookies);
831 	if (err != DDI_DMA_MAPPED) {
832 		ddi_dma_mem_free(&dma_p->acc_hdl);
833 		ddi_dma_free_handle(&dma_p->dma_hdl);
834 		dma_p->acc_hdl = NULL;
835 		dma_p->dma_hdl = NULL;
836 		return (DDI_FAILURE);
837 	}
838 
839 	dma_p->nslots = ~0U;
840 	dma_p->size = ~0U;
841 	dma_p->token = ~0U;
842 	dma_p->offset = 0;
843 	return (DDI_SUCCESS);
844 }
845 
846 /*
847  * Free one allocated area of DMAable memory
848  */
849 static void
850 iwk_free_dma_mem(iwk_dma_t *dma_p)
851 {
852 	if (dma_p->dma_hdl != NULL) {
853 		if (dma_p->ncookies) {
854 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
855 			dma_p->ncookies = 0;
856 		}
857 		ddi_dma_free_handle(&dma_p->dma_hdl);
858 		dma_p->dma_hdl = NULL;
859 	}
860 
861 	if (dma_p->acc_hdl != NULL) {
862 		ddi_dma_mem_free(&dma_p->acc_hdl);
863 		dma_p->acc_hdl = NULL;
864 	}
865 }
866 
867 /*
868  *
869  */
870 static int
871 iwk_alloc_fw_dma(iwk_sc_t *sc)
872 {
873 	int err = DDI_SUCCESS;
874 	iwk_dma_t *dma_p;
875 	char *t;
876 
877 	/*
878 	 * firmware image layout:
879 	 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->|
880 	 */
881 	t = (char *)(sc->sc_hdr + 1);
882 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz),
883 	    &fw_dma_attr, &iwk_dma_accattr,
884 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
885 	    &sc->sc_dma_fw_text);
886 	dma_p = &sc->sc_dma_fw_text;
887 	IWK_DBG((IWK_DEBUG_DMA, "text[ncookies:%d addr:%lx size:%lx]\n",
888 	    dma_p->ncookies, dma_p->cookie.dmac_address,
889 	    dma_p->cookie.dmac_size));
890 	if (err != DDI_SUCCESS) {
891 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
892 		    " text dma memory");
893 		goto fail;
894 	}
895 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->textsz));
896 
897 	t += LE_32(sc->sc_hdr->textsz);
898 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
899 	    &fw_dma_attr, &iwk_dma_accattr,
900 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
901 	    &sc->sc_dma_fw_data);
902 	dma_p = &sc->sc_dma_fw_data;
903 	IWK_DBG((IWK_DEBUG_DMA, "data[ncookies:%d addr:%lx size:%lx]\n",
904 	    dma_p->ncookies, dma_p->cookie.dmac_address,
905 	    dma_p->cookie.dmac_size));
906 	if (err != DDI_SUCCESS) {
907 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
908 		    " data dma memory");
909 		goto fail;
910 	}
911 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
912 
913 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
914 	    &fw_dma_attr, &iwk_dma_accattr,
915 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
916 	    &sc->sc_dma_fw_data_bak);
917 	dma_p = &sc->sc_dma_fw_data_bak;
918 	IWK_DBG((IWK_DEBUG_DMA, "data_bak[ncookies:%d addr:%lx "
919 	    "size:%lx]\n",
920 	    dma_p->ncookies, dma_p->cookie.dmac_address,
921 	    dma_p->cookie.dmac_size));
922 	if (err != DDI_SUCCESS) {
923 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
924 		    " data bakeup dma memory");
925 		goto fail;
926 	}
927 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
928 
929 	t += LE_32(sc->sc_hdr->datasz);
930 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz),
931 	    &fw_dma_attr, &iwk_dma_accattr,
932 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
933 	    &sc->sc_dma_fw_init_text);
934 	dma_p = &sc->sc_dma_fw_init_text;
935 	IWK_DBG((IWK_DEBUG_DMA, "init_text[ncookies:%d addr:%lx "
936 	    "size:%lx]\n",
937 	    dma_p->ncookies, dma_p->cookie.dmac_address,
938 	    dma_p->cookie.dmac_size));
939 	if (err != DDI_SUCCESS) {
940 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
941 		    "init text dma memory");
942 		goto fail;
943 	}
944 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_textsz));
945 
946 	t += LE_32(sc->sc_hdr->init_textsz);
947 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz),
948 	    &fw_dma_attr, &iwk_dma_accattr,
949 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
950 	    &sc->sc_dma_fw_init_data);
951 	dma_p = &sc->sc_dma_fw_init_data;
952 	IWK_DBG((IWK_DEBUG_DMA, "init_data[ncookies:%d addr:%lx "
953 	    "size:%lx]\n",
954 	    dma_p->ncookies, dma_p->cookie.dmac_address,
955 	    dma_p->cookie.dmac_size));
956 	if (err != DDI_SUCCESS) {
957 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
958 		    "init data dma memory");
959 		goto fail;
960 	}
961 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_datasz));
962 
963 	sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz);
964 fail:
965 	return (err);
966 }
967 
968 static void
969 iwk_free_fw_dma(iwk_sc_t *sc)
970 {
971 	iwk_free_dma_mem(&sc->sc_dma_fw_text);
972 	iwk_free_dma_mem(&sc->sc_dma_fw_data);
973 	iwk_free_dma_mem(&sc->sc_dma_fw_data_bak);
974 	iwk_free_dma_mem(&sc->sc_dma_fw_init_text);
975 	iwk_free_dma_mem(&sc->sc_dma_fw_init_data);
976 }
977 
978 /*
979  * Allocate a shared page between host and NIC.
980  */
981 static int
982 iwk_alloc_shared(iwk_sc_t *sc)
983 {
984 	iwk_dma_t *dma_p;
985 	int err = DDI_SUCCESS;
986 
987 	/* must be aligned on a 4K-page boundary */
988 	err = iwk_alloc_dma_mem(sc, sizeof (iwk_shared_t),
989 	    &sh_dma_attr, &iwk_dma_accattr,
990 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
991 	    &sc->sc_dma_sh);
992 	if (err != DDI_SUCCESS)
993 		goto fail;
994 	sc->sc_shared = (iwk_shared_t *)sc->sc_dma_sh.mem_va;
995 
996 	dma_p = &sc->sc_dma_sh;
997 	IWK_DBG((IWK_DEBUG_DMA, "sh[ncookies:%d addr:%lx size:%lx]\n",
998 	    dma_p->ncookies, dma_p->cookie.dmac_address,
999 	    dma_p->cookie.dmac_size));
1000 
1001 	return (err);
1002 fail:
1003 	iwk_free_shared(sc);
1004 	return (err);
1005 }
1006 
1007 static void
1008 iwk_free_shared(iwk_sc_t *sc)
1009 {
1010 	iwk_free_dma_mem(&sc->sc_dma_sh);
1011 }
1012 
1013 /*
1014  * Allocate a keep warm page.
1015  */
1016 static int
1017 iwk_alloc_kw(iwk_sc_t *sc)
1018 {
1019 	iwk_dma_t *dma_p;
1020 	int err = DDI_SUCCESS;
1021 
1022 	/* must be aligned on a 4K-page boundary */
1023 	err = iwk_alloc_dma_mem(sc, IWK_KW_SIZE,
1024 	    &kw_dma_attr, &iwk_dma_accattr,
1025 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1026 	    &sc->sc_dma_kw);
1027 	if (err != DDI_SUCCESS)
1028 		goto fail;
1029 
1030 	dma_p = &sc->sc_dma_kw;
1031 	IWK_DBG((IWK_DEBUG_DMA, "kw[ncookies:%d addr:%lx size:%lx]\n",
1032 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1033 	    dma_p->cookie.dmac_size));
1034 
1035 	return (err);
1036 fail:
1037 	iwk_free_kw(sc);
1038 	return (err);
1039 }
1040 
1041 static void
1042 iwk_free_kw(iwk_sc_t *sc)
1043 {
1044 	iwk_free_dma_mem(&sc->sc_dma_kw);
1045 }
1046 
1047 static int
1048 iwk_alloc_rx_ring(iwk_sc_t *sc)
1049 {
1050 	iwk_rx_ring_t *ring;
1051 	iwk_rx_data_t *data;
1052 	iwk_dma_t *dma_p;
1053 	int i, err = DDI_SUCCESS;
1054 
1055 	ring = &sc->sc_rxq;
1056 	ring->cur = 0;
1057 
1058 	err = iwk_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t),
1059 	    &ring_desc_dma_attr, &iwk_dma_accattr,
1060 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1061 	    &ring->dma_desc);
1062 	if (err != DDI_SUCCESS) {
1063 		IWK_DBG((IWK_DEBUG_DMA, "dma alloc rx ring desc "
1064 		    "failed\n"));
1065 		goto fail;
1066 	}
1067 	ring->desc = (uint32_t *)ring->dma_desc.mem_va;
1068 	dma_p = &ring->dma_desc;
1069 	IWK_DBG((IWK_DEBUG_DMA, "rx bd[ncookies:%d addr:%lx size:%lx]\n",
1070 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1071 	    dma_p->cookie.dmac_size));
1072 
1073 	/*
1074 	 * Allocate Rx buffers.
1075 	 */
1076 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1077 		data = &ring->data[i];
1078 		err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1079 		    &rx_buffer_dma_attr, &iwk_dma_accattr,
1080 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1081 		    &data->dma_data);
1082 		if (err != DDI_SUCCESS) {
1083 			IWK_DBG((IWK_DEBUG_DMA, "dma alloc rx ring "
1084 			    "buf[%d] failed\n", i));
1085 			goto fail;
1086 		}
1087 		/*
1088 		 * the physical address bit [8-36] are used,
1089 		 * instead of bit [0-31] in 3945.
1090 		 */
1091 		ring->desc[i] = LE_32((uint32_t)
1092 		    (data->dma_data.cookie.dmac_address >> 8));
1093 	}
1094 	dma_p = &ring->data[0].dma_data;
1095 	IWK_DBG((IWK_DEBUG_DMA, "rx buffer[0][ncookies:%d addr:%lx "
1096 	    "size:%lx]\n",
1097 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1098 	    dma_p->cookie.dmac_size));
1099 
1100 	IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1101 
1102 	return (err);
1103 
1104 fail:
1105 	iwk_free_rx_ring(sc);
1106 	return (err);
1107 }
1108 
1109 static void
1110 iwk_reset_rx_ring(iwk_sc_t *sc)
1111 {
1112 	int n;
1113 
1114 	iwk_mac_access_enter(sc);
1115 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1116 	for (n = 0; n < 2000; n++) {
1117 		if (IWK_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24))
1118 			break;
1119 		DELAY(1000);
1120 	}
1121 #ifdef DEBUG
1122 	if (n == 2000)
1123 		IWK_DBG((IWK_DEBUG_DMA, "timeout resetting Rx ring\n"));
1124 #endif
1125 	iwk_mac_access_exit(sc);
1126 
1127 	sc->sc_rxq.cur = 0;
1128 }
1129 
1130 static void
1131 iwk_free_rx_ring(iwk_sc_t *sc)
1132 {
1133 	int i;
1134 
1135 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1136 		if (sc->sc_rxq.data[i].dma_data.dma_hdl)
1137 			IWK_DMA_SYNC(sc->sc_rxq.data[i].dma_data,
1138 			    DDI_DMA_SYNC_FORCPU);
1139 		iwk_free_dma_mem(&sc->sc_rxq.data[i].dma_data);
1140 	}
1141 
1142 	if (sc->sc_rxq.dma_desc.dma_hdl)
1143 		IWK_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV);
1144 	iwk_free_dma_mem(&sc->sc_rxq.dma_desc);
1145 }
1146 
1147 static int
1148 iwk_alloc_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring,
1149     int slots, int qid)
1150 {
1151 	iwk_tx_data_t *data;
1152 	iwk_tx_desc_t *desc_h;
1153 	uint32_t paddr_desc_h;
1154 	iwk_cmd_t *cmd_h;
1155 	uint32_t paddr_cmd_h;
1156 	iwk_dma_t *dma_p;
1157 	int i, err = DDI_SUCCESS;
1158 
1159 	ring->qid = qid;
1160 	ring->count = TFD_QUEUE_SIZE_MAX;
1161 	ring->window = slots;
1162 	ring->queued = 0;
1163 	ring->cur = 0;
1164 
1165 	err = iwk_alloc_dma_mem(sc,
1166 	    TFD_QUEUE_SIZE_MAX * sizeof (iwk_tx_desc_t),
1167 	    &ring_desc_dma_attr, &iwk_dma_accattr,
1168 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1169 	    &ring->dma_desc);
1170 	if (err != DDI_SUCCESS) {
1171 		IWK_DBG((IWK_DEBUG_DMA, "dma alloc tx ring desc[%d]"
1172 		    " failed\n", qid));
1173 		goto fail;
1174 	}
1175 	dma_p = &ring->dma_desc;
1176 	IWK_DBG((IWK_DEBUG_DMA, "tx bd[ncookies:%d addr:%lx size:%lx]\n",
1177 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1178 	    dma_p->cookie.dmac_size));
1179 
1180 	desc_h = (iwk_tx_desc_t *)ring->dma_desc.mem_va;
1181 	paddr_desc_h = ring->dma_desc.cookie.dmac_address;
1182 
1183 	err = iwk_alloc_dma_mem(sc,
1184 	    TFD_QUEUE_SIZE_MAX * sizeof (iwk_cmd_t),
1185 	    &cmd_dma_attr, &iwk_dma_accattr,
1186 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1187 	    &ring->dma_cmd);
1188 	if (err != DDI_SUCCESS) {
1189 		IWK_DBG((IWK_DEBUG_DMA, "dma alloc tx ring cmd[%d]"
1190 		    " failed\n", qid));
1191 		goto fail;
1192 	}
1193 	dma_p = &ring->dma_cmd;
1194 	IWK_DBG((IWK_DEBUG_DMA, "tx cmd[ncookies:%d addr:%lx size:%lx]\n",
1195 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1196 	    dma_p->cookie.dmac_size));
1197 
1198 	cmd_h = (iwk_cmd_t *)ring->dma_cmd.mem_va;
1199 	paddr_cmd_h = ring->dma_cmd.cookie.dmac_address;
1200 
1201 	/*
1202 	 * Allocate Tx buffers.
1203 	 */
1204 	ring->data = kmem_zalloc(sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX,
1205 	    KM_NOSLEEP);
1206 	if (ring->data == NULL) {
1207 		IWK_DBG((IWK_DEBUG_DMA, "could not allocate "
1208 		    "tx data slots\n"));
1209 		goto fail;
1210 	}
1211 
1212 	for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) {
1213 		data = &ring->data[i];
1214 		err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1215 		    &tx_buffer_dma_attr, &iwk_dma_accattr,
1216 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
1217 		    &data->dma_data);
1218 		if (err != DDI_SUCCESS) {
1219 			IWK_DBG((IWK_DEBUG_DMA, "dma alloc tx "
1220 			    "ring buf[%d] failed\n", i));
1221 			goto fail;
1222 		}
1223 
1224 		data->desc = desc_h + i;
1225 		data->paddr_desc = paddr_desc_h +
1226 		    ((caddr_t)data->desc - (caddr_t)desc_h);
1227 		data->cmd = cmd_h +  i; /* (i % slots); */
1228 		data->paddr_cmd = paddr_cmd_h +
1229 		    ((caddr_t)data->cmd - (caddr_t)cmd_h);
1230 		    /* ((i % slots) * sizeof (iwk_cmd_t)); */
1231 	}
1232 	dma_p = &ring->data[0].dma_data;
1233 	IWK_DBG((IWK_DEBUG_DMA, "tx buffer[0][ncookies:%d addr:%lx "
1234 	    "size:%lx]\n",
1235 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1236 	    dma_p->cookie.dmac_size));
1237 
1238 	return (err);
1239 
1240 fail:
1241 	if (ring->data)
1242 		kmem_free(ring->data,
1243 		    sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX);
1244 	iwk_free_tx_ring(sc, ring);
1245 	return (err);
1246 }
1247 
1248 static void
1249 iwk_reset_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring)
1250 {
1251 	iwk_tx_data_t *data;
1252 	int i, n;
1253 
1254 	iwk_mac_access_enter(sc);
1255 
1256 	IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0);
1257 	for (n = 0; n < 200; n++) {
1258 		if (IWK_READ(sc, IWK_FH_TSSR_TX_STATUS_REG) &
1259 		    IWK_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid))
1260 			break;
1261 		DELAY(10);
1262 	}
1263 #ifdef DEBUG
1264 	if (n == 200 && iwk_dbg_flags > 0) {
1265 		IWK_DBG((IWK_DEBUG_DMA, "timeout reset tx ring %d\n",
1266 		    ring->qid));
1267 	}
1268 #endif
1269 	iwk_mac_access_exit(sc);
1270 
1271 	for (i = 0; i < ring->count; i++) {
1272 		data = &ring->data[i];
1273 		IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
1274 	}
1275 
1276 	ring->queued = 0;
1277 	ring->cur = 0;
1278 }
1279 
1280 /*ARGSUSED*/
1281 static void
1282 iwk_free_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring)
1283 {
1284 	int i;
1285 
1286 	if (ring->dma_desc.dma_hdl != NULL)
1287 		IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1288 	iwk_free_dma_mem(&ring->dma_desc);
1289 
1290 	if (ring->dma_cmd.dma_hdl != NULL)
1291 		IWK_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV);
1292 	iwk_free_dma_mem(&ring->dma_cmd);
1293 
1294 	if (ring->data != NULL) {
1295 		for (i = 0; i < ring->count; i++) {
1296 			if (ring->data[i].dma_data.dma_hdl)
1297 				IWK_DMA_SYNC(ring->data[i].dma_data,
1298 				    DDI_DMA_SYNC_FORDEV);
1299 			iwk_free_dma_mem(&ring->data[i].dma_data);
1300 		}
1301 		kmem_free(ring->data, ring->count * sizeof (iwk_tx_data_t));
1302 	}
1303 }
1304 
1305 static int
1306 iwk_ring_init(iwk_sc_t *sc)
1307 {
1308 	int i, err = DDI_SUCCESS;
1309 
1310 	for (i = 0; i < IWK_NUM_QUEUES; i++) {
1311 		if (i == IWK_CMD_QUEUE_NUM)
1312 			continue;
1313 		err = iwk_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS,
1314 		    i);
1315 		if (err != DDI_SUCCESS)
1316 			goto fail;
1317 	}
1318 	err = iwk_alloc_tx_ring(sc, &sc->sc_txq[IWK_CMD_QUEUE_NUM],
1319 	    TFD_CMD_SLOTS, IWK_CMD_QUEUE_NUM);
1320 	if (err != DDI_SUCCESS)
1321 		goto fail;
1322 	err = iwk_alloc_rx_ring(sc);
1323 	if (err != DDI_SUCCESS)
1324 		goto fail;
1325 	return (err);
1326 
1327 fail:
1328 	return (err);
1329 }
1330 
1331 static void
1332 iwk_ring_free(iwk_sc_t *sc)
1333 {
1334 	int i = IWK_NUM_QUEUES;
1335 
1336 	iwk_free_rx_ring(sc);
1337 	while (--i >= 0) {
1338 		iwk_free_tx_ring(sc, &sc->sc_txq[i]);
1339 	}
1340 }
1341 
1342 /* ARGSUSED */
1343 static ieee80211_node_t *
1344 iwk_node_alloc(ieee80211com_t *ic)
1345 {
1346 	iwk_amrr_t *amrr;
1347 
1348 	amrr = kmem_zalloc(sizeof (iwk_amrr_t), KM_SLEEP);
1349 	if (amrr != NULL)
1350 		iwk_amrr_init(amrr);
1351 	return (&amrr->in);
1352 }
1353 
1354 static void
1355 iwk_node_free(ieee80211_node_t *in)
1356 {
1357 	ieee80211com_t *ic = in->in_ic;
1358 
1359 	ic->ic_node_cleanup(in);
1360 	if (in->in_wpa_ie != NULL)
1361 		ieee80211_free(in->in_wpa_ie);
1362 	kmem_free(in, sizeof (iwk_amrr_t));
1363 }
1364 
1365 /*ARGSUSED*/
1366 static int
1367 iwk_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1368 {
1369 	iwk_sc_t *sc = (iwk_sc_t *)ic;
1370 	ieee80211_node_t *in = ic->ic_bss;
1371 	iwk_tx_power_table_cmd_t txpower;
1372 	enum ieee80211_state ostate = ic->ic_state;
1373 	int i, err = IWK_SUCCESS;
1374 
1375 	mutex_enter(&sc->sc_glock);
1376 	switch (nstate) {
1377 	case IEEE80211_S_SCAN:
1378 		if (ostate == IEEE80211_S_INIT) {
1379 			ic->ic_flags |= IEEE80211_F_SCAN | IEEE80211_F_ASCAN;
1380 			/* let LED blink when scanning */
1381 			iwk_set_led(sc, 2, 10, 2);
1382 
1383 			if ((err = iwk_scan(sc)) != 0) {
1384 				IWK_DBG((IWK_DEBUG_80211,
1385 				    "could not initiate scan\n"));
1386 				ic->ic_flags &= ~(IEEE80211_F_SCAN |
1387 				    IEEE80211_F_ASCAN);
1388 				mutex_exit(&sc->sc_glock);
1389 				return (err);
1390 			}
1391 		}
1392 		ic->ic_state = nstate;
1393 		sc->sc_clk = 0;
1394 		mutex_exit(&sc->sc_glock);
1395 		return (IWK_SUCCESS);
1396 
1397 	case IEEE80211_S_AUTH:
1398 		/* reset state to handle reassociations correctly */
1399 		sc->sc_config.assoc_id = 0;
1400 		sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
1401 
1402 		/*
1403 		 * before sending authentication and association request frame,
1404 		 * we need do something in the hardware, such as setting the
1405 		 * channel same to the target AP...
1406 		 */
1407 		if ((err = iwk_hw_set_before_auth(sc)) != 0) {
1408 			IWK_DBG((IWK_DEBUG_80211,
1409 			    "could not send authentication request\n"));
1410 			mutex_exit(&sc->sc_glock);
1411 			return (err);
1412 		}
1413 		break;
1414 
1415 	case IEEE80211_S_RUN:
1416 		if (ic->ic_opmode == IEEE80211_M_MONITOR) {
1417 			/* let LED blink when monitoring */
1418 			iwk_set_led(sc, 2, 10, 10);
1419 			break;
1420 		}
1421 
1422 		if (ic->ic_opmode != IEEE80211_M_STA) {
1423 			(void) iwk_hw_set_before_auth(sc);
1424 			/* need setup beacon here */
1425 		}
1426 		IWK_DBG((IWK_DEBUG_80211, "iwk: associated."));
1427 
1428 		/* update adapter's configuration */
1429 		sc->sc_config.assoc_id = sc->sc_assoc_id & 0x3fff;
1430 		/* short preamble/slot time are negotiated when associating */
1431 		sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
1432 		    RXON_FLG_SHORT_SLOT_MSK);
1433 
1434 		if (ic->ic_flags & IEEE80211_F_SHSLOT)
1435 			sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK);
1436 
1437 		if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
1438 			sc->sc_config.flags |=
1439 			    LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
1440 
1441 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ASSOC_MSK);
1442 
1443 		if (ic->ic_opmode != IEEE80211_M_STA)
1444 			sc->sc_config.filter_flags |=
1445 			    LE_32(RXON_FILTER_BCON_AWARE_MSK);
1446 
1447 		IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x"
1448 		    " filter_flags %x\n",
1449 		    sc->sc_config.chan, sc->sc_config.flags,
1450 		    sc->sc_config.filter_flags));
1451 		err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
1452 		    sizeof (iwk_rxon_cmd_t), 1);
1453 		if (err != IWK_SUCCESS) {
1454 			IWK_DBG((IWK_DEBUG_80211,
1455 			    "could not update configuration\n"));
1456 			mutex_exit(&sc->sc_glock);
1457 			return (err);
1458 		}
1459 
1460 		/*
1461 		 * set Tx power for 2.4GHz channels
1462 		 * (need further investigation. fix tx power at present)
1463 		 * This cmd should be issued each time the reply_rxon cmd is
1464 		 * invoked.
1465 		 */
1466 		(void) memset(&txpower, 0, sizeof (txpower));
1467 		txpower.band = 1; /* for 2.4G */
1468 		txpower.channel = sc->sc_config.chan;
1469 		txpower.channel_normal_width = 0;
1470 		for (i = 0; i < POWER_TABLE_NUM_HT_OFDM_ENTRIES; i++) {
1471 			txpower.tx_power.ht_ofdm_power[i].s.ramon_tx_gain =
1472 			    0x3f3f;
1473 			txpower.tx_power.ht_ofdm_power[i].s.dsp_predis_atten =
1474 			    110 | (110 << 8);
1475 		}
1476 		txpower.tx_power.ht_ofdm_power[POWER_TABLE_NUM_HT_OFDM_ENTRIES]
1477 		    .s.ramon_tx_gain = 0x3f3f;
1478 		txpower.tx_power.ht_ofdm_power[POWER_TABLE_NUM_HT_OFDM_ENTRIES]
1479 		    .s.dsp_predis_atten = 110 | (110 << 8);
1480 		err = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &txpower,
1481 		    sizeof (txpower), 1);
1482 		if (err != IWK_SUCCESS) {
1483 			cmn_err(CE_WARN, "iwk_newstate(): failed to "
1484 			    "set txpower\n");
1485 			return (err);
1486 		}
1487 
1488 		/* start automatic rate control */
1489 		mutex_enter(&sc->sc_mt_lock);
1490 		if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
1491 			sc->sc_flags |= IWK_F_RATE_AUTO_CTL;
1492 			/* set rate to some reasonable initial value */
1493 			i = in->in_rates.ir_nrates - 1;
1494 			while (i > 0 && IEEE80211_RATE(i) > 72)
1495 				i--;
1496 			in->in_txrate = i;
1497 		} else {
1498 			sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL;
1499 		}
1500 		mutex_exit(&sc->sc_mt_lock);
1501 
1502 		/* set LED on after associated */
1503 		iwk_set_led(sc, 2, 0, 1);
1504 		break;
1505 
1506 	case IEEE80211_S_INIT:
1507 		/* set LED off after init */
1508 		iwk_set_led(sc, 2, 1, 0);
1509 		break;
1510 	case IEEE80211_S_ASSOC:
1511 		break;
1512 	}
1513 
1514 	mutex_exit(&sc->sc_glock);
1515 	return (sc->sc_newstate(ic, nstate, arg));
1516 }
1517 
1518 /*ARGSUSED*/
1519 static int iwk_key_set(ieee80211com_t *ic, const struct ieee80211_key *k,
1520     const uint8_t mac[IEEE80211_ADDR_LEN])
1521 {
1522 	iwk_sc_t *sc = (iwk_sc_t *)ic;
1523 	iwk_add_sta_t node;
1524 	int err;
1525 
1526 	switch (k->wk_cipher->ic_cipher) {
1527 	case IEEE80211_CIPHER_WEP:
1528 	case IEEE80211_CIPHER_TKIP:
1529 		return (1); /* sofeware do it. */
1530 	case IEEE80211_CIPHER_AES_CCM:
1531 		break;
1532 	default:
1533 		return (0);
1534 	}
1535 	sc->sc_config.filter_flags &= ~(RXON_FILTER_DIS_DECRYPT_MSK
1536 	    | RXON_FILTER_DIS_GRP_DECRYPT_MSK);
1537 
1538 	mutex_enter(&sc->sc_glock);
1539 
1540 	/* update ap/multicast node */
1541 	(void) memset(&node, 0, sizeof (node));
1542 	if (IEEE80211_IS_MULTICAST(mac)) {
1543 		(void) memset(node.bssid, 0xff, 6);
1544 		node.id = IWK_BROADCAST_ID;
1545 	} else {
1546 		IEEE80211_ADDR_COPY(node.bssid, ic->ic_bss->in_bssid);
1547 		node.id = IWK_AP_ID;
1548 	}
1549 	if (k->wk_flags & IEEE80211_KEY_XMIT) {
1550 		node.key_flags = 0;
1551 		node.keyp = k->wk_keyix;
1552 	} else {
1553 		node.key_flags = (1 << 14);
1554 		node.keyp = k->wk_keyix + 4;
1555 	}
1556 	(void) memcpy(node.key, k->wk_key, k->wk_keylen);
1557 	node.key_flags |= (STA_KEY_FLG_CCMP | (1 << 3) | (k->wk_keyix << 8));
1558 	node.sta_mask = STA_MODIFY_KEY_MASK;
1559 	node.control = 1;
1560 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
1561 	if (err != IWK_SUCCESS) {
1562 		cmn_err(CE_WARN, "iwk_key_set():"
1563 		    "failed to update ap node\n");
1564 		mutex_exit(&sc->sc_glock);
1565 		return (0);
1566 	}
1567 	mutex_exit(&sc->sc_glock);
1568 	return (1);
1569 }
1570 
1571 /*
1572  * exclusive access to mac begin.
1573  */
1574 static void
1575 iwk_mac_access_enter(iwk_sc_t *sc)
1576 {
1577 	uint32_t tmp;
1578 	int n;
1579 
1580 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
1581 	IWK_WRITE(sc, CSR_GP_CNTRL,
1582 	    tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1583 
1584 	/* wait until we succeed */
1585 	for (n = 0; n < 1000; n++) {
1586 		if ((IWK_READ(sc, CSR_GP_CNTRL) &
1587 		    (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1588 		    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) ==
1589 		    CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN)
1590 			break;
1591 		DELAY(10);
1592 	}
1593 	if (n == 1000)
1594 		IWK_DBG((IWK_DEBUG_PIO, "could not lock memory\n"));
1595 }
1596 
1597 /*
1598  * exclusive access to mac end.
1599  */
1600 static void
1601 iwk_mac_access_exit(iwk_sc_t *sc)
1602 {
1603 	uint32_t tmp = IWK_READ(sc, CSR_GP_CNTRL);
1604 	IWK_WRITE(sc, CSR_GP_CNTRL,
1605 	    tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1606 }
1607 
1608 /*
1609  * this function defined here for future use.
1610  * static uint32_t
1611  * iwk_mem_read(iwk_sc_t *sc, uint32_t addr)
1612  * {
1613  * 	IWK_WRITE(sc, HBUS_TARG_MEM_RADDR, addr);
1614  * 	return (IWK_READ(sc, HBUS_TARG_MEM_RDAT));
1615  * }
1616  */
1617 
1618 static void
1619 iwk_mem_write(iwk_sc_t *sc, uint32_t addr, uint32_t data)
1620 {
1621 	IWK_WRITE(sc, HBUS_TARG_MEM_WADDR, addr);
1622 	IWK_WRITE(sc, HBUS_TARG_MEM_WDAT, data);
1623 }
1624 
1625 static uint32_t
1626 iwk_reg_read(iwk_sc_t *sc, uint32_t addr)
1627 {
1628 	IWK_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24));
1629 	return (IWK_READ(sc, HBUS_TARG_PRPH_RDAT));
1630 }
1631 
1632 static void
1633 iwk_reg_write(iwk_sc_t *sc, uint32_t addr, uint32_t data)
1634 {
1635 	IWK_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24));
1636 	IWK_WRITE(sc, HBUS_TARG_PRPH_WDAT, data);
1637 }
1638 
1639 static void
1640 iwk_reg_write_region_4(iwk_sc_t *sc, uint32_t addr,
1641     uint32_t *data, int wlen)
1642 {
1643 	for (; wlen > 0; wlen--, data++, addr += 4)
1644 		iwk_reg_write(sc, addr, *data);
1645 }
1646 
1647 
1648 /*
1649  * ucode load/initialization steps:
1650  * 1)  load Bootstrap State Machine (BSM) with "bootstrap" uCode image.
1651  * BSM contains a small memory that *always* stays powered up, so it can
1652  * retain the bootstrap program even when the card is in a power-saving
1653  * power-down state.  The BSM loads the small program into ARC processor's
1654  * instruction memory when triggered by power-up.
1655  * 2)  load Initialize image via bootstrap program.
1656  * The Initialize image sets up regulatory and calibration data for the
1657  * Runtime/Protocol uCode. This sends a REPLY_ALIVE notification when completed.
1658  * The 4965 reply contains calibration data for temperature, voltage and tx gain
1659  * correction.
1660  */
1661 static int
1662 iwk_load_firmware(iwk_sc_t *sc)
1663 {
1664 	uint32_t *boot_fw = (uint32_t *)sc->sc_boot;
1665 	uint32_t size = sc->sc_hdr->bootsz;
1666 	int n, err = IWK_SUCCESS;
1667 
1668 	/*
1669 	 * The physical address bit [4-35] of the initialize uCode.
1670 	 * In the initialize alive notify interrupt the physical address of
1671 	 * the runtime ucode will be set for loading.
1672 	 */
1673 	iwk_mac_access_enter(sc);
1674 
1675 	iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG,
1676 	    sc->sc_dma_fw_init_text.cookie.dmac_address >> 4);
1677 	iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG,
1678 	    sc->sc_dma_fw_init_data.cookie.dmac_address >> 4);
1679 	iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG,
1680 	    sc->sc_dma_fw_init_text.cookie.dmac_size);
1681 	iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG,
1682 	    sc->sc_dma_fw_init_data.cookie.dmac_size);
1683 
1684 	/* load bootstrap code into BSM memory */
1685 	iwk_reg_write_region_4(sc, BSM_SRAM_LOWER_BOUND, boot_fw,
1686 	    size / sizeof (uint32_t));
1687 
1688 	iwk_reg_write(sc, BSM_WR_MEM_SRC_REG, 0);
1689 	iwk_reg_write(sc, BSM_WR_MEM_DST_REG, RTC_INST_LOWER_BOUND);
1690 	iwk_reg_write(sc, BSM_WR_DWCOUNT_REG, size / sizeof (uint32_t));
1691 
1692 	/*
1693 	 * prepare to load initialize uCode
1694 	 */
1695 	iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
1696 
1697 	/* wait while the adapter is busy loading the firmware */
1698 	for (n = 0; n < 1000; n++) {
1699 		if (!(iwk_reg_read(sc, BSM_WR_CTRL_REG) &
1700 		    BSM_WR_CTRL_REG_BIT_START))
1701 			break;
1702 		DELAY(10);
1703 	}
1704 	if (n == 1000) {
1705 		IWK_DBG((IWK_DEBUG_FW,
1706 		    "timeout transferring firmware\n"));
1707 		err = ETIMEDOUT;
1708 		return (err);
1709 	}
1710 
1711 	/* for future power-save mode use */
1712 	iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
1713 
1714 	iwk_mac_access_exit(sc);
1715 
1716 	return (err);
1717 }
1718 
1719 /*ARGSUSED*/
1720 static void
1721 iwk_rx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data)
1722 {
1723 	ieee80211com_t *ic = &sc->sc_ic;
1724 	iwk_rx_ring_t *ring = &sc->sc_rxq;
1725 	iwk_rx_phy_res_t *stat;
1726 	ieee80211_node_t *in;
1727 	uint32_t *tail;
1728 	struct ieee80211_frame *wh;
1729 	mblk_t *mp;
1730 	uint16_t len, rssi, mrssi, agc;
1731 	int16_t t;
1732 	uint32_t ants, i;
1733 	struct iwk_rx_non_cfg_phy *phyinfo;
1734 
1735 	/* assuming not 11n here. cope with 11n in phase-II */
1736 	stat = (iwk_rx_phy_res_t *)(desc + 1);
1737 	if (stat->cfg_phy_cnt > 20) {
1738 		return;
1739 	}
1740 
1741 	phyinfo = (struct iwk_rx_non_cfg_phy *)stat->non_cfg_phy;
1742 	agc = (phyinfo->agc_info & IWK_AGC_DB_MASK) >> IWK_AGC_DB_POS;
1743 	mrssi = 0;
1744 	ants = (stat->phy_flags & RX_PHY_FLAGS_ANTENNAE_MASK)
1745 	    >> RX_PHY_FLAGS_ANTENNAE_OFFSET;
1746 	for (i = 0; i < 3; i++) {
1747 		if (ants & (1 << i))
1748 			mrssi = MAX(mrssi, phyinfo->rssi_info[i << 1]);
1749 	}
1750 	t = mrssi - agc - 44; /* t is the dBM value */
1751 	/*
1752 	 * convert dBm to percentage ???
1753 	 */
1754 	rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t)))
1755 	    / (75 * 75);
1756 	if (rssi > 100)
1757 		rssi = 100;
1758 	if (rssi < 1)
1759 		rssi = 1;
1760 	len = stat->byte_count;
1761 	tail = (uint32_t *)((uint8_t *)(stat + 1) + stat->cfg_phy_cnt + len);
1762 
1763 	IWK_DBG((IWK_DEBUG_RX, "rx intr: idx=%d phy_len=%x len=%d "
1764 	    "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x "
1765 	    "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat),
1766 	    len, stat->rate.r.s.rate, stat->channel,
1767 	    LE_32(stat->timestampl), stat->non_cfg_phy_cnt,
1768 	    stat->cfg_phy_cnt, LE_32(*tail)));
1769 
1770 	if ((len < 16) || (len > sc->sc_dmabuf_sz)) {
1771 		IWK_DBG((IWK_DEBUG_RX, "rx frame oversize\n"));
1772 		return;
1773 	}
1774 
1775 	/*
1776 	 * discard Rx frames with bad CRC
1777 	 */
1778 	if ((LE_32(*tail) &
1779 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) !=
1780 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) {
1781 		IWK_DBG((IWK_DEBUG_RX, "rx crc error tail: %x\n",
1782 		    LE_32(*tail)));
1783 		sc->sc_rx_err++;
1784 		return;
1785 	}
1786 
1787 	wh = (struct ieee80211_frame *)
1788 	    ((uint8_t *)(stat + 1)+ stat->cfg_phy_cnt);
1789 	if (*(uint8_t *)wh == IEEE80211_FC0_SUBTYPE_ASSOC_RESP) {
1790 		sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2);
1791 		IWK_DBG((IWK_DEBUG_RX, "rx : association id = %x\n",
1792 		    sc->sc_assoc_id));
1793 	}
1794 #ifdef DEBUG
1795 	if (iwk_dbg_flags & IWK_DEBUG_RX)
1796 		ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0);
1797 #endif
1798 	in = ieee80211_find_rxnode(ic, wh);
1799 	mp = allocb(len, BPRI_MED);
1800 	if (mp) {
1801 		(void) memcpy(mp->b_wptr, wh, len);
1802 		mp->b_wptr += len;
1803 
1804 		/* send the frame to the 802.11 layer */
1805 		(void) ieee80211_input(ic, mp, in, rssi, 0);
1806 	} else {
1807 		sc->sc_rx_nobuf++;
1808 		IWK_DBG((IWK_DEBUG_RX,
1809 		    "iwk_rx_intr(): alloc rx buf failed\n"));
1810 	}
1811 	/* release node reference */
1812 	ieee80211_free_node(in);
1813 }
1814 
1815 /*ARGSUSED*/
1816 static void
1817 iwk_tx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data)
1818 {
1819 	ieee80211com_t *ic = &sc->sc_ic;
1820 	iwk_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3];
1821 	iwk_tx_stat_t *stat = (iwk_tx_stat_t *)(desc + 1);
1822 	iwk_amrr_t *amrr = (iwk_amrr_t *)ic->ic_bss;
1823 
1824 	IWK_DBG((IWK_DEBUG_TX, "tx done: qid=%d idx=%d"
1825 	    " retries=%d frame_count=%x nkill=%d "
1826 	    "rate=%x duration=%d status=%x\n",
1827 	    desc->hdr.qid, desc->hdr.idx, stat->ntries, stat->frame_count,
1828 	    stat->bt_kill_count, stat->rate.r.s.rate,
1829 	    LE_32(stat->duration), LE_32(stat->status)));
1830 
1831 	amrr->txcnt++;
1832 	IWK_DBG((IWK_DEBUG_RATECTL, "tx: %d cnt\n", amrr->txcnt));
1833 	if (stat->ntries > 0) {
1834 		amrr->retrycnt++;
1835 		sc->sc_tx_retries++;
1836 		IWK_DBG((IWK_DEBUG_TX, "tx: %d retries\n",
1837 		    sc->sc_tx_retries));
1838 	}
1839 
1840 	sc->sc_tx_timer = 0;
1841 
1842 	mutex_enter(&sc->sc_tx_lock);
1843 	ring->queued--;
1844 	if (ring->queued < 0)
1845 		ring->queued = 0;
1846 	if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count << 3))) {
1847 		sc->sc_need_reschedule = 0;
1848 		mutex_exit(&sc->sc_tx_lock);
1849 		mac_tx_update(ic->ic_mach);
1850 		mutex_enter(&sc->sc_tx_lock);
1851 	}
1852 	mutex_exit(&sc->sc_tx_lock);
1853 }
1854 
1855 static void
1856 iwk_cmd_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc)
1857 {
1858 	if ((desc->hdr.qid & 7) != 4) {
1859 		return;
1860 	}
1861 	mutex_enter(&sc->sc_glock);
1862 	sc->sc_flags |= IWK_F_CMD_DONE;
1863 	cv_signal(&sc->sc_cmd_cv);
1864 	mutex_exit(&sc->sc_glock);
1865 	IWK_DBG((IWK_DEBUG_CMD, "rx cmd: "
1866 	    "qid=%x idx=%d flags=%x type=0x%x\n",
1867 	    desc->hdr.qid, desc->hdr.idx, desc->hdr.flags,
1868 	    desc->hdr.type));
1869 }
1870 
1871 static void
1872 iwk_ucode_alive(iwk_sc_t *sc, iwk_rx_desc_t *desc)
1873 {
1874 	uint32_t base, i;
1875 	struct iwk_alive_resp *ar =
1876 	    (struct iwk_alive_resp *)(desc + 1);
1877 
1878 	/* the microcontroller is ready */
1879 	IWK_DBG((IWK_DEBUG_FW,
1880 	    "microcode alive notification minor: %x major: %x type:"
1881 	    " %x subtype: %x\n",
1882 	    ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype));
1883 
1884 	if (LE_32(ar->is_valid) != UCODE_VALID_OK) {
1885 		IWK_DBG((IWK_DEBUG_FW,
1886 		    "microcontroller initialization failed\n"));
1887 	}
1888 	if (ar->ver_subtype == INITIALIZE_SUBTYPE) {
1889 		IWK_DBG((IWK_DEBUG_FW,
1890 		    "initialization alive received.\n"));
1891 		(void) memcpy(&sc->sc_card_alive_init, ar,
1892 		    sizeof (struct iwk_init_alive_resp));
1893 		/* XXX get temperature */
1894 		iwk_mac_access_enter(sc);
1895 		iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG,
1896 		    sc->sc_dma_fw_text.cookie.dmac_address >> 4);
1897 		iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG,
1898 		    sc->sc_dma_fw_data_bak.cookie.dmac_address >> 4);
1899 		iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG,
1900 		    sc->sc_dma_fw_data.cookie.dmac_size);
1901 		iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG,
1902 		    sc->sc_dma_fw_text.cookie.dmac_size | 0x80000000);
1903 		iwk_mac_access_exit(sc);
1904 	} else {
1905 		IWK_DBG((IWK_DEBUG_FW, "runtime alive received.\n"));
1906 		(void) memcpy(&sc->sc_card_alive_run, ar,
1907 		    sizeof (struct iwk_alive_resp));
1908 
1909 		/*
1910 		 * Init SCD related registers to make Tx work. XXX
1911 		 */
1912 		iwk_mac_access_enter(sc);
1913 
1914 		/* read sram address of data base */
1915 		sc->sc_scd_base = iwk_reg_read(sc, SCD_SRAM_BASE_ADDR);
1916 
1917 		/* clear and init SCD_CONTEXT_DATA_OFFSET area. 128 bytes */
1918 		for (base = sc->sc_scd_base + SCD_CONTEXT_DATA_OFFSET, i = 0;
1919 		    i < 128; i += 4)
1920 			iwk_mem_write(sc, base + i, 0);
1921 
1922 		/* clear and init SCD_TX_STTS_BITMAP_OFFSET area. 256 bytes */
1923 		for (base = sc->sc_scd_base + SCD_TX_STTS_BITMAP_OFFSET;
1924 		    i < 256; i += 4)
1925 			iwk_mem_write(sc, base + i, 0);
1926 
1927 		/* clear and init SCD_TRANSLATE_TBL_OFFSET area. 32 bytes */
1928 		for (base = sc->sc_scd_base + SCD_TRANSLATE_TBL_OFFSET;
1929 		    i < sizeof (uint16_t) * IWK_NUM_QUEUES; i += 4)
1930 			iwk_mem_write(sc, base + i, 0);
1931 
1932 		iwk_reg_write(sc, SCD_DRAM_BASE_ADDR,
1933 		    sc->sc_dma_sh.cookie.dmac_address >> 10);
1934 		iwk_reg_write(sc, SCD_QUEUECHAIN_SEL, 0);
1935 
1936 		/* initiate the tx queues */
1937 		for (i = 0; i < IWK_NUM_QUEUES; i++) {
1938 			iwk_reg_write(sc, SCD_QUEUE_RDPTR(i), 0);
1939 			IWK_WRITE(sc, HBUS_TARG_WRPTR, (i << 8));
1940 			iwk_mem_write(sc, sc->sc_scd_base +
1941 			    SCD_CONTEXT_QUEUE_OFFSET(i),
1942 			    (SCD_WIN_SIZE & 0x7f));
1943 			iwk_mem_write(sc, sc->sc_scd_base +
1944 			    SCD_CONTEXT_QUEUE_OFFSET(i) + sizeof (uint32_t),
1945 			    (SCD_FRAME_LIMIT & 0x7f) << 16);
1946 		}
1947 		/* interrupt enable on each queue0-7 */
1948 		iwk_reg_write(sc, SCD_INTERRUPT_MASK,
1949 		    (1 << IWK_NUM_QUEUES) - 1);
1950 		/* enable  each channel 0-7 */
1951 		iwk_reg_write(sc, SCD_TXFACT,
1952 		    SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
1953 		/*
1954 		 * queue 0-7 maps to FIFO 0-7 and
1955 		 * all queues work under FIFO mode (none-scheduler-ack)
1956 		 */
1957 		for (i = 0; i < 7; i++) {
1958 			iwk_reg_write(sc,
1959 			    SCD_QUEUE_STATUS_BITS(i),
1960 			    (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
1961 			    (i << SCD_QUEUE_STTS_REG_POS_TXF)|
1962 			    SCD_QUEUE_STTS_REG_MSK);
1963 		}
1964 		iwk_mac_access_exit(sc);
1965 
1966 		sc->sc_flags |= IWK_F_FW_INIT;
1967 		cv_signal(&sc->sc_fw_cv);
1968 	}
1969 
1970 }
1971 
1972 static uint_t
1973 iwk_rx_softintr(caddr_t arg)
1974 {
1975 	iwk_sc_t *sc = (iwk_sc_t *)arg;
1976 	ieee80211com_t *ic = &sc->sc_ic;
1977 	iwk_rx_desc_t *desc;
1978 	iwk_rx_data_t *data;
1979 	uint32_t index;
1980 
1981 	mutex_enter(&sc->sc_glock);
1982 	if (sc->sc_rx_softint_pending != 1) {
1983 		mutex_exit(&sc->sc_glock);
1984 		return (DDI_INTR_UNCLAIMED);
1985 	}
1986 	/* disable interrupts */
1987 	IWK_WRITE(sc, CSR_INT_MASK, 0);
1988 	mutex_exit(&sc->sc_glock);
1989 
1990 	/*
1991 	 * firmware has moved the index of the rx queue, driver get it,
1992 	 * and deal with it.
1993 	 */
1994 	index = LE_32(sc->sc_shared->val0) & 0xfff;
1995 
1996 	while (sc->sc_rxq.cur != index) {
1997 		data = &sc->sc_rxq.data[sc->sc_rxq.cur];
1998 		desc = (iwk_rx_desc_t *)data->dma_data.mem_va;
1999 
2000 		IWK_DBG((IWK_DEBUG_INTR, "rx notification index = %d"
2001 		    " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n",
2002 		    index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx,
2003 		    desc->hdr.flags, desc->hdr.type, LE_32(desc->len)));
2004 
2005 		/* a command other than a tx need to be replied */
2006 		if (!(desc->hdr.qid & 0x80) &&
2007 		    (desc->hdr.type != REPLY_RX_PHY_CMD) &&
2008 		    (desc->hdr.type != REPLY_TX))
2009 			iwk_cmd_intr(sc, desc);
2010 
2011 		switch (desc->hdr.type) {
2012 		case REPLY_4965_RX:
2013 			iwk_rx_intr(sc, desc, data);
2014 			break;
2015 
2016 		case REPLY_TX:
2017 			iwk_tx_intr(sc, desc, data);
2018 			break;
2019 
2020 		case REPLY_ALIVE:
2021 			iwk_ucode_alive(sc, desc);
2022 			break;
2023 
2024 		case CARD_STATE_NOTIFICATION:
2025 		{
2026 			uint32_t *status = (uint32_t *)(desc + 1);
2027 
2028 			IWK_DBG((IWK_DEBUG_RADIO, "state changed to %x\n",
2029 			    LE_32(*status)));
2030 
2031 			if (LE_32(*status) & 1) {
2032 				/*
2033 				 * the radio button has to be pushed(OFF). It
2034 				 * is considered as a hw error, the
2035 				 * iwk_thread() tries to recover it after the
2036 				 * button is pushed again(ON)
2037 				 */
2038 				cmn_err(CE_NOTE,
2039 				    "iwk: Radio transmitter is off\n");
2040 				sc->sc_ostate = sc->sc_ic.ic_state;
2041 				ieee80211_new_state(&sc->sc_ic,
2042 				    IEEE80211_S_INIT, -1);
2043 				sc->sc_flags |=
2044 				    (IWK_F_HW_ERR_RECOVER | IWK_F_RADIO_OFF);
2045 			}
2046 			break;
2047 		}
2048 		case SCAN_START_NOTIFICATION:
2049 		{
2050 			iwk_start_scan_t *scan =
2051 			    (iwk_start_scan_t *)(desc + 1);
2052 
2053 			IWK_DBG((IWK_DEBUG_SCAN,
2054 			    "scanning channel %d status %x\n",
2055 			    scan->chan, LE_32(scan->status)));
2056 
2057 			ic->ic_curchan = &ic->ic_sup_channels[scan->chan];
2058 			break;
2059 		}
2060 		case SCAN_COMPLETE_NOTIFICATION:
2061 			IWK_DBG((IWK_DEBUG_SCAN, "scan finished\n"));
2062 			ieee80211_end_scan(ic);
2063 			break;
2064 		}
2065 
2066 		sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE;
2067 	}
2068 
2069 	/*
2070 	 * driver dealt with what reveived in rx queue and tell the information
2071 	 * to the firmware.
2072 	 */
2073 	index = (index == 0) ? RX_QUEUE_SIZE - 1 : index - 1;
2074 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7));
2075 
2076 	mutex_enter(&sc->sc_glock);
2077 	/* re-enable interrupts */
2078 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2079 	sc->sc_rx_softint_pending = 0;
2080 	mutex_exit(&sc->sc_glock);
2081 
2082 	return (DDI_INTR_CLAIMED);
2083 }
2084 
2085 static uint_t
2086 iwk_intr(caddr_t arg)
2087 {
2088 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2089 	uint32_t r, rfh;
2090 
2091 	mutex_enter(&sc->sc_glock);
2092 	r = IWK_READ(sc, CSR_INT);
2093 	if (r == 0 || r == 0xffffffff) {
2094 		mutex_exit(&sc->sc_glock);
2095 		return (DDI_INTR_UNCLAIMED);
2096 	}
2097 
2098 	IWK_DBG((IWK_DEBUG_INTR, "interrupt reg %x\n", r));
2099 
2100 	rfh = IWK_READ(sc, CSR_FH_INT_STATUS);
2101 	IWK_DBG((IWK_DEBUG_INTR, "FH interrupt reg %x\n", rfh));
2102 	/* disable interrupts */
2103 	IWK_WRITE(sc, CSR_INT_MASK, 0);
2104 	/* ack interrupts */
2105 	IWK_WRITE(sc, CSR_INT, r);
2106 	IWK_WRITE(sc, CSR_FH_INT_STATUS, rfh);
2107 
2108 	if (sc->sc_rx_softint_id == NULL) {
2109 		mutex_exit(&sc->sc_glock);
2110 		return (DDI_INTR_CLAIMED);
2111 	}
2112 
2113 	if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) {
2114 		IWK_DBG((IWK_DEBUG_FW, "fatal firmware error\n"));
2115 		mutex_exit(&sc->sc_glock);
2116 		iwk_stop(sc);
2117 		sc->sc_ostate = sc->sc_ic.ic_state;
2118 		ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
2119 		sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
2120 		return (DDI_INTR_CLAIMED);
2121 	}
2122 
2123 	if (r & BIT_INT_RF_KILL) {
2124 		IWK_DBG((IWK_DEBUG_RADIO, "RF kill\n"));
2125 	}
2126 
2127 	if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) ||
2128 	    (rfh & FH_INT_RX_MASK)) {
2129 		sc->sc_rx_softint_pending = 1;
2130 		ddi_trigger_softintr(sc->sc_rx_softint_id);
2131 	}
2132 
2133 	if (r & BIT_INT_ALIVE)	{
2134 		IWK_DBG((IWK_DEBUG_FW, "firmware initialized.\n"));
2135 	}
2136 
2137 	/* re-enable interrupts */
2138 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2139 	mutex_exit(&sc->sc_glock);
2140 
2141 	return (DDI_INTR_CLAIMED);
2142 }
2143 
2144 static uint8_t
2145 iwk_rate_to_plcp(int rate)
2146 {
2147 	uint8_t ret;
2148 
2149 	switch (rate) {
2150 	/* CCK rates */
2151 	case 2:
2152 		ret = 0xa;
2153 		break;
2154 	case 4:
2155 		ret = 0x14;
2156 		break;
2157 	case 11:
2158 		ret = 0x37;
2159 		break;
2160 	case 22:
2161 		ret = 0x6e;
2162 		break;
2163 	/* OFDM rates */
2164 	case 12:
2165 		ret = 0xd;
2166 		break;
2167 	case 18:
2168 		ret = 0xf;
2169 		break;
2170 	case 24:
2171 		ret = 0x5;
2172 		break;
2173 	case 36:
2174 		ret = 0x7;
2175 		break;
2176 	case 48:
2177 		ret = 0x9;
2178 		break;
2179 	case 72:
2180 		ret = 0xb;
2181 		break;
2182 	case 96:
2183 		ret = 0x1;
2184 		break;
2185 	case 108:
2186 		ret = 0x3;
2187 		break;
2188 	default:
2189 		ret = 0;
2190 		break;
2191 	}
2192 	return (ret);
2193 }
2194 
2195 static mblk_t *
2196 iwk_m_tx(void *arg, mblk_t *mp)
2197 {
2198 	iwk_sc_t	*sc = (iwk_sc_t *)arg;
2199 	ieee80211com_t	*ic = &sc->sc_ic;
2200 	mblk_t			*next;
2201 
2202 	if (ic->ic_state != IEEE80211_S_RUN) {
2203 		freemsgchain(mp);
2204 		return (NULL);
2205 	}
2206 
2207 	while (mp != NULL) {
2208 		next = mp->b_next;
2209 		mp->b_next = NULL;
2210 		if (iwk_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) {
2211 			mp->b_next = next;
2212 			break;
2213 		}
2214 		mp = next;
2215 	}
2216 	return (mp);
2217 }
2218 
2219 /* ARGSUSED */
2220 static int
2221 iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
2222 {
2223 	iwk_sc_t *sc = (iwk_sc_t *)ic;
2224 	iwk_tx_ring_t *ring;
2225 	iwk_tx_desc_t *desc;
2226 	iwk_tx_data_t *data;
2227 	iwk_cmd_t *cmd;
2228 	iwk_tx_cmd_t *tx;
2229 	ieee80211_node_t *in;
2230 	struct ieee80211_frame *wh;
2231 	struct ieee80211_key *k = NULL;
2232 	mblk_t *m, *m0;
2233 	int rate, hdrlen, len, len0, mblen, off, err = IWK_SUCCESS;
2234 	uint16_t masks = 0;
2235 
2236 	ring = &sc->sc_txq[0];
2237 	data = &ring->data[ring->cur];
2238 	desc = data->desc;
2239 	cmd = data->cmd;
2240 	bzero(desc, sizeof (*desc));
2241 	bzero(cmd, sizeof (*cmd));
2242 
2243 	mutex_enter(&sc->sc_tx_lock);
2244 	if (ring->queued > ring->count - 64) {
2245 		IWK_DBG((IWK_DEBUG_TX, "iwk_send(): no txbuf\n"));
2246 		sc->sc_need_reschedule = 1;
2247 		mutex_exit(&sc->sc_tx_lock);
2248 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
2249 		    IEEE80211_FC0_TYPE_DATA) {
2250 			freemsg(mp);
2251 		}
2252 		sc->sc_tx_nobuf++;
2253 		err = IWK_FAIL;
2254 		goto exit;
2255 	}
2256 	mutex_exit(&sc->sc_tx_lock);
2257 
2258 	hdrlen = sizeof (struct ieee80211_frame);
2259 
2260 	m = allocb(msgdsize(mp) + 32, BPRI_MED);
2261 	if (m == NULL) { /* can not alloc buf, drop this package */
2262 		cmn_err(CE_WARN,
2263 		    "iwk_send(): failed to allocate msgbuf\n");
2264 		freemsg(mp);
2265 		err = IWK_SUCCESS;
2266 		goto exit;
2267 	}
2268 	for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) {
2269 		mblen = MBLKL(m0);
2270 		(void) memcpy(m->b_rptr + off, m0->b_rptr, mblen);
2271 		off += mblen;
2272 	}
2273 	m->b_wptr += off;
2274 	freemsg(mp);
2275 
2276 	wh = (struct ieee80211_frame *)m->b_rptr;
2277 
2278 	in = ieee80211_find_txnode(ic, wh->i_addr1);
2279 	if (in == NULL) {
2280 		cmn_err(CE_WARN, "iwk_send(): failed to find tx node\n");
2281 		freemsg(m);
2282 		sc->sc_tx_err++;
2283 		err = IWK_SUCCESS;
2284 		goto exit;
2285 	}
2286 	(void) ieee80211_encap(ic, m, in);
2287 
2288 	cmd->hdr.type = REPLY_TX;
2289 	cmd->hdr.flags = 0;
2290 	cmd->hdr.qid = ring->qid;
2291 	cmd->hdr.idx = ring->cur;
2292 
2293 	tx = (iwk_tx_cmd_t *)cmd->data;
2294 	tx->tx_flags = 0;
2295 
2296 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2297 		tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK));
2298 	} else {
2299 		tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
2300 	}
2301 
2302 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
2303 		k = ieee80211_crypto_encap(ic, m);
2304 		if (k == NULL) {
2305 			freemsg(m);
2306 			sc->sc_tx_err++;
2307 			err = IWK_SUCCESS;
2308 			goto exit;
2309 		}
2310 
2311 		if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
2312 			tx->sec_ctl = 2; /* for CCMP */
2313 			tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
2314 			(void) memcpy(&tx->key, k->wk_key, k->wk_keylen);
2315 		}
2316 
2317 		/* packet header may have moved, reset our local pointer */
2318 		wh = (struct ieee80211_frame *)m->b_rptr;
2319 	}
2320 
2321 	len = msgdsize(m);
2322 
2323 #ifdef DEBUG
2324 	if (iwk_dbg_flags & IWK_DEBUG_TX)
2325 		ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0);
2326 #endif
2327 
2328 	/* pickup a rate */
2329 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
2330 	    IEEE80211_FC0_TYPE_MGT) {
2331 		/* mgmt frames are sent at 1M */
2332 		rate = in->in_rates.ir_rates[0];
2333 	} else {
2334 		/*
2335 		 * do it here for the software way rate control.
2336 		 * later for rate scaling in hardware.
2337 		 * maybe like the following, for management frame:
2338 		 * tx->initial_rate_index = LINK_QUAL_MAX_RETRY_NUM - 1;
2339 		 * for data frame:
2340 		 * tx->tx_flags |= (LE_32(TX_CMD_FLG_STA_RATE_MSK));
2341 		 * rate = in->in_rates.ir_rates[in->in_txrate];
2342 		 * tx->initial_rate_index = 1;
2343 		 *
2344 		 * now the txrate is determined in tx cmd flags, set to the
2345 		 * max value 54M for 11g and 11M for 11b.
2346 		 */
2347 
2348 		if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
2349 			rate = ic->ic_fixed_rate;
2350 		} else {
2351 			rate = in->in_rates.ir_rates[in->in_txrate];
2352 		}
2353 	}
2354 	rate &= IEEE80211_RATE_VAL;
2355 	IWK_DBG((IWK_DEBUG_TX, "tx rate[%d of %d] = %x",
2356 	    in->in_txrate, in->in_rates.ir_nrates, rate));
2357 
2358 	tx->tx_flags |= (LE_32(TX_CMD_FLG_SEQ_CTL_MSK));
2359 
2360 	len0 = roundup(4 + sizeof (iwk_tx_cmd_t) + hdrlen, 4);
2361 	if (len0 != (4 + sizeof (iwk_tx_cmd_t) + hdrlen))
2362 		tx->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
2363 
2364 	/* retrieve destination node's id */
2365 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2366 		tx->sta_id = IWK_BROADCAST_ID;
2367 	} else {
2368 		tx->sta_id = IWK_AP_ID;
2369 	}
2370 
2371 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
2372 	    IEEE80211_FC0_TYPE_MGT) {
2373 		/* tell h/w to set timestamp in probe responses */
2374 		if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2375 		    IEEE80211_FC0_SUBTYPE_PROBE_RESP)
2376 			tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK);
2377 
2378 		if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2379 		    IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
2380 		    ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2381 		    IEEE80211_FC0_SUBTYPE_REASSOC_REQ))
2382 			tx->timeout.pm_frame_timeout = 3;
2383 		else
2384 			tx->timeout.pm_frame_timeout = 2;
2385 	} else
2386 		tx->timeout.pm_frame_timeout = 0;
2387 	if (rate == 2 || rate == 4 || rate == 11 || rate == 22)
2388 		masks |= RATE_MCS_CCK_MSK;
2389 
2390 	masks |= RATE_MCS_ANT_B_MSK;
2391 	tx->rate.r.rate_n_flags = (iwk_rate_to_plcp(rate) | masks);
2392 
2393 	IWK_DBG((IWK_DEBUG_TX, "tx flag = %x",
2394 	    tx->tx_flags));
2395 
2396 	tx->rts_retry_limit = 60;
2397 	tx->data_retry_limit = 15;
2398 
2399 	tx->stop_time.life_time  = LE_32(0xffffffff);
2400 
2401 	tx->len = LE_16(len);
2402 
2403 	tx->dram_lsb_ptr =
2404 	    data->paddr_cmd + 4 + offsetof(iwk_tx_cmd_t, scratch);
2405 	tx->dram_msb_ptr = 0;
2406 	tx->driver_txop = 0;
2407 	tx->next_frame_len = 0;
2408 
2409 	(void) memcpy(tx + 1, m->b_rptr, hdrlen);
2410 	m->b_rptr += hdrlen;
2411 	(void) memcpy(data->dma_data.mem_va, m->b_rptr, len - hdrlen);
2412 
2413 	IWK_DBG((IWK_DEBUG_TX, "sending data: qid=%d idx=%d len=%d",
2414 	    ring->qid, ring->cur, len));
2415 
2416 	/*
2417 	 * first segment includes the tx cmd plus the 802.11 header,
2418 	 * the second includes the remaining of the 802.11 frame.
2419 	 */
2420 	desc->val0 = LE_32(2 << 24);
2421 	desc->pa[0].tb1_addr = LE_32(data->paddr_cmd);
2422 	desc->pa[0].val1 = ((len0 << 4) & 0xfff0) |
2423 	    ((data->dma_data.cookie.dmac_address & 0xffff) << 16);
2424 	desc->pa[0].val2 =
2425 	    ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) |
2426 	    ((len - hdrlen) << 20);
2427 	IWK_DBG((IWK_DEBUG_TX, "phy addr1 = 0x%x phy addr2 = 0x%x "
2428 	    "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x",
2429 	    data->paddr_cmd, data->dma_data.cookie.dmac_address,
2430 	    len0, len - hdrlen, desc->pa[0].val1, desc->pa[0].val2));
2431 
2432 	mutex_enter(&sc->sc_tx_lock);
2433 	ring->queued++;
2434 	mutex_exit(&sc->sc_tx_lock);
2435 
2436 	/* kick ring */
2437 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].tfd_offset[ring->cur].val
2438 	    = 8 + len;
2439 	if (ring->cur < IWK_MAX_WIN_SIZE) {
2440 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
2441 		    tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8 + len;
2442 	}
2443 
2444 	IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
2445 	IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
2446 
2447 	ring->cur = (ring->cur + 1) % ring->count;
2448 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
2449 	freemsg(m);
2450 	/* release node reference */
2451 	ieee80211_free_node(in);
2452 
2453 	ic->ic_stats.is_tx_bytes += len;
2454 	ic->ic_stats.is_tx_frags++;
2455 
2456 	if (sc->sc_tx_timer == 0)
2457 		sc->sc_tx_timer = 10;
2458 exit:
2459 	return (err);
2460 }
2461 
2462 static void
2463 iwk_m_ioctl(void* arg, queue_t *wq, mblk_t *mp)
2464 {
2465 	iwk_sc_t	*sc  = (iwk_sc_t *)arg;
2466 	ieee80211com_t	*ic = &sc->sc_ic;
2467 	int		err;
2468 
2469 	err = ieee80211_ioctl(ic, wq, mp);
2470 	if (err == ENETRESET) {
2471 		/*
2472 		 * This is special for the hidden AP connection.
2473 		 * In any case, we should make sure only one 'scan'
2474 		 * in the driver for a 'connect' CLI command. So
2475 		 * when connecting to a hidden AP, the scan is just
2476 		 * sent out to the air when we know the desired
2477 		 * essid of the AP we want to connect.
2478 		 */
2479 		if (ic->ic_des_esslen) {
2480 			(void) ieee80211_new_state(ic,
2481 			    IEEE80211_S_SCAN, -1);
2482 		}
2483 	}
2484 }
2485 
2486 /*ARGSUSED*/
2487 static int
2488 iwk_m_stat(void *arg, uint_t stat, uint64_t *val)
2489 {
2490 	iwk_sc_t	*sc  = (iwk_sc_t *)arg;
2491 	ieee80211com_t	*ic = &sc->sc_ic;
2492 	ieee80211_node_t *in = ic->ic_bss;
2493 	struct ieee80211_rateset *rs = &in->in_rates;
2494 
2495 	mutex_enter(&sc->sc_glock);
2496 	switch (stat) {
2497 	case MAC_STAT_IFSPEED:
2498 		*val = ((ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) ?
2499 		    (rs->ir_rates[in->in_txrate] & IEEE80211_RATE_VAL)
2500 		    : ic->ic_fixed_rate) * 5000000ull;
2501 		break;
2502 	case MAC_STAT_NOXMTBUF:
2503 		*val = sc->sc_tx_nobuf;
2504 		break;
2505 	case MAC_STAT_NORCVBUF:
2506 		*val = sc->sc_rx_nobuf;
2507 		break;
2508 	case MAC_STAT_IERRORS:
2509 		*val = sc->sc_rx_err;
2510 		break;
2511 	case MAC_STAT_RBYTES:
2512 		*val = ic->ic_stats.is_rx_bytes;
2513 		break;
2514 	case MAC_STAT_IPACKETS:
2515 		*val = ic->ic_stats.is_rx_frags;
2516 		break;
2517 	case MAC_STAT_OBYTES:
2518 		*val = ic->ic_stats.is_tx_bytes;
2519 		break;
2520 	case MAC_STAT_OPACKETS:
2521 		*val = ic->ic_stats.is_tx_frags;
2522 		break;
2523 	case MAC_STAT_OERRORS:
2524 	case WIFI_STAT_TX_FAILED:
2525 		*val = sc->sc_tx_err;
2526 		break;
2527 	case WIFI_STAT_TX_RETRANS:
2528 		*val = sc->sc_tx_retries;
2529 		break;
2530 	case WIFI_STAT_FCS_ERRORS:
2531 	case WIFI_STAT_WEP_ERRORS:
2532 	case WIFI_STAT_TX_FRAGS:
2533 	case WIFI_STAT_MCAST_TX:
2534 	case WIFI_STAT_RTS_SUCCESS:
2535 	case WIFI_STAT_RTS_FAILURE:
2536 	case WIFI_STAT_ACK_FAILURE:
2537 	case WIFI_STAT_RX_FRAGS:
2538 	case WIFI_STAT_MCAST_RX:
2539 	case WIFI_STAT_RX_DUPS:
2540 		mutex_exit(&sc->sc_glock);
2541 		return (ieee80211_stat(ic, stat, val));
2542 	default:
2543 		mutex_exit(&sc->sc_glock);
2544 		return (ENOTSUP);
2545 	}
2546 	mutex_exit(&sc->sc_glock);
2547 
2548 	return (IWK_SUCCESS);
2549 
2550 }
2551 
2552 static int
2553 iwk_m_start(void *arg)
2554 {
2555 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2556 	ieee80211com_t	*ic = &sc->sc_ic;
2557 	int err;
2558 
2559 	err = iwk_init(sc);
2560 
2561 	if (err != IWK_SUCCESS) {
2562 		/*
2563 		 * The hw init err(eg. RF is OFF). Return Success to make
2564 		 * the 'plumb' succeed. The iwk_thread() tries to re-init
2565 		 * background.
2566 		 */
2567 		mutex_enter(&sc->sc_glock);
2568 		sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
2569 		mutex_exit(&sc->sc_glock);
2570 		return (IWK_SUCCESS);
2571 	}
2572 
2573 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2574 
2575 	mutex_enter(&sc->sc_glock);
2576 	sc->sc_flags |= IWK_F_RUNNING;
2577 	mutex_exit(&sc->sc_glock);
2578 
2579 	return (IWK_SUCCESS);
2580 }
2581 
2582 static void
2583 iwk_m_stop(void *arg)
2584 {
2585 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2586 	ieee80211com_t	*ic = &sc->sc_ic;
2587 
2588 	iwk_stop(sc);
2589 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2590 	mutex_enter(&sc->sc_mt_lock);
2591 	sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
2592 	sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL;
2593 	mutex_exit(&sc->sc_mt_lock);
2594 	mutex_enter(&sc->sc_glock);
2595 	sc->sc_flags &= ~IWK_F_RUNNING;
2596 	mutex_exit(&sc->sc_glock);
2597 }
2598 
2599 /*ARGSUSED*/
2600 static int
2601 iwk_m_unicst(void *arg, const uint8_t *macaddr)
2602 {
2603 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2604 	ieee80211com_t	*ic = &sc->sc_ic;
2605 	int err;
2606 
2607 	if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) {
2608 		IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr);
2609 		mutex_enter(&sc->sc_glock);
2610 		err = iwk_config(sc);
2611 		mutex_exit(&sc->sc_glock);
2612 		if (err != IWK_SUCCESS) {
2613 			cmn_err(CE_WARN,
2614 			    "iwk_m_unicst(): "
2615 			    "failed to configure device\n");
2616 			goto fail;
2617 		}
2618 	}
2619 	return (IWK_SUCCESS);
2620 fail:
2621 	return (err);
2622 }
2623 
2624 /*ARGSUSED*/
2625 static int
2626 iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m)
2627 {
2628 	return (IWK_SUCCESS);
2629 }
2630 
2631 /*ARGSUSED*/
2632 static int
2633 iwk_m_promisc(void *arg, boolean_t on)
2634 {
2635 	return (IWK_SUCCESS);
2636 }
2637 
2638 static void
2639 iwk_thread(iwk_sc_t *sc)
2640 {
2641 	ieee80211com_t	*ic = &sc->sc_ic;
2642 	clock_t clk;
2643 	int times = 0, err, n = 0, timeout = 0;
2644 	uint32_t tmp;
2645 
2646 	mutex_enter(&sc->sc_mt_lock);
2647 	while (sc->sc_mf_thread_switch) {
2648 		tmp = IWK_READ(sc, CSR_GP_CNTRL);
2649 		if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) {
2650 			sc->sc_flags &= ~IWK_F_RADIO_OFF;
2651 		} else {
2652 			sc->sc_flags |= IWK_F_RADIO_OFF;
2653 		}
2654 		/*
2655 		 * If the RF is OFF, do nothing.
2656 		 */
2657 		if (sc->sc_flags & IWK_F_RADIO_OFF) {
2658 			mutex_exit(&sc->sc_mt_lock);
2659 			delay(drv_usectohz(100000));
2660 			mutex_enter(&sc->sc_mt_lock);
2661 			continue;
2662 		}
2663 
2664 		/*
2665 		 * recovery fatal error
2666 		 */
2667 		if (ic->ic_mach &&
2668 		    (sc->sc_flags & IWK_F_HW_ERR_RECOVER)) {
2669 
2670 			IWK_DBG((IWK_DEBUG_FW,
2671 			    "iwk_thread(): "
2672 			    "try to recover fatal hw error: %d\n", times++));
2673 
2674 			iwk_stop(sc);
2675 			ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2676 
2677 			mutex_exit(&sc->sc_mt_lock);
2678 			delay(drv_usectohz(2000000 + n*500000));
2679 			mutex_enter(&sc->sc_mt_lock);
2680 			err = iwk_init(sc);
2681 			if (err != IWK_SUCCESS) {
2682 				n++;
2683 				if (n < 20)
2684 					continue;
2685 			}
2686 			n = 0;
2687 			if (!err)
2688 				sc->sc_flags |= IWK_F_RUNNING;
2689 			sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
2690 			mutex_exit(&sc->sc_mt_lock);
2691 			delay(drv_usectohz(2000000));
2692 			if (sc->sc_ostate != IEEE80211_S_INIT)
2693 				ieee80211_new_state(ic, IEEE80211_S_SCAN, 0);
2694 			mutex_enter(&sc->sc_mt_lock);
2695 		}
2696 
2697 		/*
2698 		 * rate ctl
2699 		 */
2700 		if (ic->ic_mach &&
2701 		    (sc->sc_flags & IWK_F_RATE_AUTO_CTL)) {
2702 			clk = ddi_get_lbolt();
2703 			if (clk > sc->sc_clk + drv_usectohz(500000)) {
2704 				iwk_amrr_timeout(sc);
2705 			}
2706 		}
2707 
2708 		mutex_exit(&sc->sc_mt_lock);
2709 		delay(drv_usectohz(100000));
2710 		mutex_enter(&sc->sc_mt_lock);
2711 
2712 		if (sc->sc_tx_timer) {
2713 			timeout++;
2714 			if (timeout == 10) {
2715 				sc->sc_tx_timer--;
2716 				if (sc->sc_tx_timer == 0) {
2717 					sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
2718 					sc->sc_ostate = IEEE80211_S_RUN;
2719 					IWK_DBG((IWK_DEBUG_FW,
2720 					    "iwk_thread(): try to recover from"
2721 					    " 'send fail\n"));
2722 				}
2723 				timeout = 0;
2724 			}
2725 		}
2726 
2727 	}
2728 	sc->sc_mf_thread = NULL;
2729 	cv_signal(&sc->sc_mt_cv);
2730 	mutex_exit(&sc->sc_mt_lock);
2731 }
2732 
2733 
2734 /*
2735  * Send a command to the firmware.
2736  */
2737 static int
2738 iwk_cmd(iwk_sc_t *sc, int code, const void *buf, int size, int async)
2739 {
2740 	iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM];
2741 	iwk_tx_desc_t *desc;
2742 	iwk_cmd_t *cmd;
2743 
2744 	ASSERT(size <= sizeof (cmd->data));
2745 	ASSERT(mutex_owned(&sc->sc_glock));
2746 
2747 	IWK_DBG((IWK_DEBUG_CMD, "iwk_cmd() code[%d]", code));
2748 	desc = ring->data[ring->cur].desc;
2749 	cmd = ring->data[ring->cur].cmd;
2750 
2751 	cmd->hdr.type = (uint8_t)code;
2752 	cmd->hdr.flags = 0;
2753 	cmd->hdr.qid = ring->qid;
2754 	cmd->hdr.idx = ring->cur;
2755 	(void) memcpy(cmd->data, buf, size);
2756 	(void) memset(desc, 0, sizeof (*desc));
2757 
2758 	desc->val0 = LE_32(1 << 24);
2759 	desc->pa[0].tb1_addr =
2760 	    (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff);
2761 	desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0;
2762 
2763 	/* kick cmd ring XXX */
2764 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid]
2765 	    .tfd_offset[ring->cur].val = 8;
2766 	if (ring->cur < IWK_MAX_WIN_SIZE) {
2767 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid]
2768 		    .tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8;
2769 	}
2770 	ring->cur = (ring->cur + 1) % ring->count;
2771 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
2772 
2773 	if (async)
2774 		return (IWK_SUCCESS);
2775 	else {
2776 		clock_t clk;
2777 		sc->sc_flags &= ~IWK_F_CMD_DONE;
2778 		clk = ddi_get_lbolt() + drv_usectohz(2000000);
2779 		while (!(sc->sc_flags & IWK_F_CMD_DONE)) {
2780 			if (cv_timedwait(&sc->sc_cmd_cv, &sc->sc_glock, clk)
2781 			    < 0)
2782 				break;
2783 		}
2784 		if (sc->sc_flags & IWK_F_CMD_DONE)
2785 			return (IWK_SUCCESS);
2786 		else
2787 			return (IWK_FAIL);
2788 	}
2789 }
2790 
2791 static void
2792 iwk_set_led(iwk_sc_t *sc, uint8_t id, uint8_t off, uint8_t on)
2793 {
2794 	iwk_led_cmd_t led;
2795 
2796 	led.interval = LE_32(100000);	/* unit: 100ms */
2797 	led.id = id;
2798 	led.off = off;
2799 	led.on = on;
2800 
2801 	(void) iwk_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1);
2802 }
2803 
2804 static int
2805 iwk_hw_set_before_auth(iwk_sc_t *sc)
2806 {
2807 	ieee80211com_t *ic = &sc->sc_ic;
2808 	ieee80211_node_t *in = ic->ic_bss;
2809 	iwk_tx_power_table_cmd_t txpower;
2810 	iwk_add_sta_t node;
2811 	iwk_link_quality_cmd_t link_quality;
2812 	struct ieee80211_rateset rs;
2813 	uint16_t masks = 0, rate;
2814 	int i, err;
2815 
2816 	/* update adapter's configuration according the info of target AP */
2817 	IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
2818 	sc->sc_config.chan = ieee80211_chan2ieee(ic, in->in_chan);
2819 	if (ic->ic_curmode == IEEE80211_MODE_11B) {
2820 		sc->sc_config.cck_basic_rates  = 0x03;
2821 		sc->sc_config.ofdm_basic_rates = 0;
2822 	} else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
2823 	    (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
2824 		sc->sc_config.cck_basic_rates  = 0;
2825 		sc->sc_config.ofdm_basic_rates = 0x15;
2826 	} else { /* assume 802.11b/g */
2827 		sc->sc_config.cck_basic_rates  = 0x0f;
2828 		sc->sc_config.ofdm_basic_rates = 0xff;
2829 	}
2830 
2831 	sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
2832 	    RXON_FLG_SHORT_SLOT_MSK);
2833 
2834 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
2835 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK);
2836 	else
2837 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK);
2838 
2839 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
2840 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
2841 	else
2842 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK);
2843 
2844 	IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x "
2845 	    "filter_flags %x  cck %x ofdm %x"
2846 	    " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n",
2847 	    sc->sc_config.chan, sc->sc_config.flags,
2848 	    sc->sc_config.filter_flags,
2849 	    sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates,
2850 	    sc->sc_config.bssid[0], sc->sc_config.bssid[1],
2851 	    sc->sc_config.bssid[2], sc->sc_config.bssid[3],
2852 	    sc->sc_config.bssid[4], sc->sc_config.bssid[5]));
2853 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
2854 	    sizeof (iwk_rxon_cmd_t), 1);
2855 	if (err != IWK_SUCCESS) {
2856 		cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
2857 		    " failed to config chan%d\n",
2858 		    sc->sc_config.chan);
2859 		return (err);
2860 	}
2861 
2862 	/*
2863 	 * set Tx power for 2.4GHz channels
2864 	 * (need further investigation. fix tx power at present)
2865 	 */
2866 	(void) memset(&txpower, 0, sizeof (txpower));
2867 	txpower.band = 1; /* for 2.4G */
2868 	txpower.channel = sc->sc_config.chan;
2869 	txpower.channel_normal_width = 0;
2870 	for (i = 0; i < POWER_TABLE_NUM_HT_OFDM_ENTRIES; i++) {
2871 		txpower.tx_power.ht_ofdm_power[i].s
2872 		    .ramon_tx_gain = 0x3f3f;
2873 		txpower.tx_power.ht_ofdm_power[i].s
2874 		    .dsp_predis_atten = 110 | (110 << 8);
2875 	}
2876 	txpower.tx_power.ht_ofdm_power[POWER_TABLE_NUM_HT_OFDM_ENTRIES].
2877 	    s.ramon_tx_gain = 0x3f3f;
2878 	txpower.tx_power.ht_ofdm_power[POWER_TABLE_NUM_HT_OFDM_ENTRIES].
2879 	    s.dsp_predis_atten = 110 | (110 << 8);
2880 	err = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &txpower,
2881 	    sizeof (txpower), 1);
2882 	if (err != IWK_SUCCESS) {
2883 		cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
2884 		    " failed to set txpower\n");
2885 		return (err);
2886 	}
2887 
2888 	/* add default AP node */
2889 	(void) memset(&node, 0, sizeof (node));
2890 	IEEE80211_ADDR_COPY(node.bssid, in->in_bssid);
2891 	node.id = IWK_AP_ID;
2892 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
2893 	if (err != IWK_SUCCESS) {
2894 		cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
2895 		    " failed to add BSS node\n");
2896 		return (err);
2897 	}
2898 
2899 	/* TX_LINK_QUALITY cmd ? */
2900 	(void) memset(&link_quality, 0, sizeof (link_quality));
2901 	rs = ic->ic_sup_rates[ieee80211_chan2mode(ic, ic->ic_curchan)];
2902 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
2903 		if (i < rs.ir_nrates)
2904 			rate = rs.ir_rates[rs.ir_nrates - i];
2905 		else
2906 			rate = 2;
2907 		if (rate == 2 || rate == 4 || rate == 11 || rate == 22)
2908 			masks |= RATE_MCS_CCK_MSK;
2909 		masks |= RATE_MCS_ANT_B_MSK;
2910 		masks &= ~RATE_MCS_ANT_A_MSK;
2911 		link_quality.rate_n_flags[i] =
2912 		    iwk_rate_to_plcp(rate) | masks;
2913 	}
2914 
2915 	link_quality.general_params.single_stream_ant_msk = 2;
2916 	link_quality.general_params.dual_stream_ant_msk = 3;
2917 	link_quality.agg_params.agg_dis_start_th = 3;
2918 	link_quality.agg_params.agg_time_limit = LE_16(4000);
2919 	link_quality.sta_id = IWK_AP_ID;
2920 	err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
2921 	    sizeof (link_quality), 1);
2922 	if (err != IWK_SUCCESS) {
2923 		cmn_err(CE_WARN, "iwk_hw_set_before_auth(): "
2924 		    "failed to config link quality table\n");
2925 		return (err);
2926 	}
2927 
2928 	return (IWK_SUCCESS);
2929 }
2930 
2931 /*
2932  * Send a scan request(assembly scan cmd) to the firmware.
2933  */
2934 static int
2935 iwk_scan(iwk_sc_t *sc)
2936 {
2937 	ieee80211com_t *ic = &sc->sc_ic;
2938 	iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM];
2939 	iwk_tx_desc_t *desc;
2940 	iwk_tx_data_t *data;
2941 	iwk_cmd_t *cmd;
2942 	iwk_scan_hdr_t *hdr;
2943 	iwk_scan_chan_t *chan;
2944 	struct ieee80211_frame *wh;
2945 	ieee80211_node_t *in = ic->ic_bss;
2946 	struct ieee80211_rateset *rs;
2947 	enum ieee80211_phymode mode;
2948 	uint8_t *frm;
2949 	int i, pktlen, nrates;
2950 
2951 	data = &ring->data[ring->cur];
2952 	desc = data->desc;
2953 	cmd = (iwk_cmd_t *)data->dma_data.mem_va;
2954 
2955 	cmd->hdr.type = REPLY_SCAN_CMD;
2956 	cmd->hdr.flags = 0;
2957 	cmd->hdr.qid = ring->qid;
2958 	cmd->hdr.idx = ring->cur | 0x40;
2959 
2960 	hdr = (iwk_scan_hdr_t *)cmd->data;
2961 	(void) memset(hdr, 0, sizeof (iwk_scan_hdr_t));
2962 	hdr->nchan = 11;
2963 	hdr->quiet_time = LE_16(5);
2964 	hdr->quiet_plcp_th = LE_16(1);
2965 
2966 	hdr->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
2967 	hdr->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
2968 	    LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) |
2969 	    (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
2970 	    (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
2971 
2972 	hdr->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
2973 	hdr->tx_cmd.sta_id = IWK_BROADCAST_ID;
2974 	hdr->tx_cmd.stop_time.life_time = 0xffffffff;
2975 	hdr->tx_cmd.tx_flags |= (0x200);
2976 	hdr->tx_cmd.rate.r.rate_n_flags = iwk_rate_to_plcp(2);
2977 	hdr->tx_cmd.rate.r.rate_n_flags |=
2978 	    (RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK);
2979 	hdr->direct_scan[0].len = ic->ic_des_esslen;
2980 	hdr->direct_scan[0].id  = IEEE80211_ELEMID_SSID;
2981 
2982 	if (ic->ic_des_esslen)
2983 		bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid,
2984 		    ic->ic_des_esslen);
2985 	else
2986 		bzero(hdr->direct_scan[0].ssid,
2987 		    sizeof (hdr->direct_scan[0].ssid));
2988 	/*
2989 	 * a probe request frame is required after the REPLY_SCAN_CMD
2990 	 */
2991 	wh = (struct ieee80211_frame *)(hdr + 1);
2992 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
2993 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
2994 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
2995 	(void) memset(wh->i_addr1, 0xff, 6);
2996 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr);
2997 	(void) memset(wh->i_addr3, 0xff, 6);
2998 	*(uint16_t *)&wh->i_dur[0] = 0;
2999 	*(uint16_t *)&wh->i_seq[0] = 0;
3000 
3001 	frm = (uint8_t *)(wh + 1);
3002 
3003 	/* essid IE */
3004 	*frm++ = IEEE80211_ELEMID_SSID;
3005 	*frm++ = in->in_esslen;
3006 	(void) memcpy(frm, in->in_essid, in->in_esslen);
3007 	frm += in->in_esslen;
3008 
3009 	mode = ieee80211_chan2mode(ic, ic->ic_curchan);
3010 	rs = &ic->ic_sup_rates[mode];
3011 
3012 	/* supported rates IE */
3013 	*frm++ = IEEE80211_ELEMID_RATES;
3014 	nrates = rs->ir_nrates;
3015 	if (nrates > IEEE80211_RATE_SIZE)
3016 		nrates = IEEE80211_RATE_SIZE;
3017 	*frm++ = (uint8_t)nrates;
3018 	(void) memcpy(frm, rs->ir_rates, nrates);
3019 	frm += nrates;
3020 
3021 	/* supported xrates IE */
3022 	if (rs->ir_nrates > IEEE80211_RATE_SIZE) {
3023 		nrates = rs->ir_nrates - IEEE80211_RATE_SIZE;
3024 		*frm++ = IEEE80211_ELEMID_XRATES;
3025 		*frm++ = (uint8_t)nrates;
3026 		(void) memcpy(frm, rs->ir_rates + IEEE80211_RATE_SIZE, nrates);
3027 		frm += nrates;
3028 	}
3029 
3030 	/* optionnal IE (usually for wpa) */
3031 	if (ic->ic_opt_ie != NULL) {
3032 		(void) memcpy(frm, ic->ic_opt_ie, ic->ic_opt_ie_len);
3033 		frm += ic->ic_opt_ie_len;
3034 	}
3035 
3036 	/* setup length of probe request */
3037 	hdr->tx_cmd.len = LE_16(frm - (uint8_t *)wh);
3038 	hdr->len = hdr->nchan * sizeof (iwk_scan_chan_t) +
3039 	    hdr->tx_cmd.len + sizeof (iwk_scan_hdr_t);
3040 
3041 	/*
3042 	 * the attribute of the scan channels are required after the probe
3043 	 * request frame.
3044 	 */
3045 	chan = (iwk_scan_chan_t *)frm;
3046 	for (i = 1; i <= hdr->nchan; i++, chan++) {
3047 		chan->type = 3;
3048 		chan->chan = (uint8_t)i;
3049 		chan->tpc.tx_gain = 0x3f;
3050 		chan->tpc.dsp_atten = 110;
3051 		chan->active_dwell = LE_16(20);
3052 		chan->passive_dwell = LE_16(120);
3053 
3054 		frm += sizeof (iwk_scan_chan_t);
3055 	}
3056 
3057 	pktlen = frm - (uint8_t *)cmd;
3058 
3059 	(void) memset(desc, 0, sizeof (*desc));
3060 	desc->val0 = LE_32(1 << 24);
3061 	desc->pa[0].tb1_addr =
3062 	    (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff);
3063 	desc->pa[0].val1 = (pktlen << 4) & 0xfff0;
3064 
3065 	/*
3066 	 * maybe for cmd, filling the byte cnt table is not necessary.
3067 	 * anyway, we fill it here.
3068 	 */
3069 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid]
3070 	    .tfd_offset[ring->cur].val = 8;
3071 	if (ring->cur < IWK_MAX_WIN_SIZE) {
3072 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid]
3073 		    .tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8;
3074 	}
3075 
3076 	/* kick cmd ring */
3077 	ring->cur = (ring->cur + 1) % ring->count;
3078 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3079 
3080 	return (IWK_SUCCESS);
3081 }
3082 
3083 static int
3084 iwk_config(iwk_sc_t *sc)
3085 {
3086 	ieee80211com_t *ic = &sc->sc_ic;
3087 	iwk_tx_power_table_cmd_t txpower;
3088 	iwk_powertable_cmd_t powertable;
3089 	iwk_bt_cmd_t bt;
3090 	iwk_add_sta_t node;
3091 	iwk_link_quality_cmd_t link_quality;
3092 	int i, err;
3093 	uint16_t masks = 0;
3094 
3095 	/*
3096 	 * set power mode. Disable power management at present, do it later
3097 	 */
3098 	(void) memset(&powertable, 0, sizeof (powertable));
3099 	powertable.flags = LE_16(0x8);
3100 	err = iwk_cmd(sc, POWER_TABLE_CMD, &powertable,
3101 	    sizeof (powertable), 0);
3102 	if (err != IWK_SUCCESS) {
3103 		cmn_err(CE_WARN, "iwk_config(): failed to set power mode\n");
3104 		return (err);
3105 	}
3106 
3107 	/* configure bt coexistence */
3108 	(void) memset(&bt, 0, sizeof (bt));
3109 	bt.flags = 3;
3110 	bt.lead_time = 0xaa;
3111 	bt.max_kill = 1;
3112 	err = iwk_cmd(sc, REPLY_BT_CONFIG, &bt,
3113 	    sizeof (bt), 0);
3114 	if (err != IWK_SUCCESS) {
3115 		cmn_err(CE_WARN,
3116 		    "iwk_config(): "
3117 		    "failed to configurate bt coexistence\n");
3118 		return (err);
3119 	}
3120 
3121 	/* configure rxon */
3122 	(void) memset(&sc->sc_config, 0, sizeof (iwk_rxon_cmd_t));
3123 	IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
3124 	IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
3125 	sc->sc_config.chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
3126 	sc->sc_config.flags = (RXON_FLG_TSF2HOST_MSK | RXON_FLG_AUTO_DETECT_MSK
3127 	    | RXON_FLG_BAND_24G_MSK);
3128 	sc->sc_config.flags &= (~RXON_FLG_CCK_MSK);
3129 	switch (ic->ic_opmode) {
3130 	case IEEE80211_M_STA:
3131 		sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
3132 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3133 		    RXON_FILTER_DIS_DECRYPT_MSK |
3134 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
3135 		break;
3136 	case IEEE80211_M_IBSS:
3137 	case IEEE80211_M_AHDEMO:
3138 		sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
3139 		break;
3140 	case IEEE80211_M_HOSTAP:
3141 		sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
3142 		break;
3143 	case IEEE80211_M_MONITOR:
3144 		sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
3145 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3146 		    RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
3147 		break;
3148 	}
3149 	sc->sc_config.cck_basic_rates  = 0x0f;
3150 	sc->sc_config.ofdm_basic_rates = 0xff;
3151 
3152 	sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
3153 	sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff;
3154 
3155 	/* set antenna */
3156 
3157 	sc->sc_config.rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3158 	    LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) |
3159 	    (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3160 	    (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3161 
3162 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
3163 	    sizeof (iwk_rxon_cmd_t), 0);
3164 	if (err != IWK_SUCCESS) {
3165 		cmn_err(CE_WARN, "iwk_config(): "
3166 		    "failed to set configure command\n");
3167 		return (err);
3168 	}
3169 
3170 	/*
3171 	 * set Tx power for 2.4GHz channels
3172 	 * (need further investigation. fix tx power at present)
3173 	 */
3174 	(void) memset(&txpower, 0, sizeof (txpower));
3175 	txpower.band = 1; /* for 2.4G */
3176 	txpower.channel = sc->sc_config.chan;
3177 	txpower.channel_normal_width = 0;
3178 	for (i = 0; i < POWER_TABLE_NUM_HT_OFDM_ENTRIES; i++) {
3179 		txpower.tx_power.ht_ofdm_power[i]
3180 		    .s.ramon_tx_gain = 0x3f3f;
3181 		txpower.tx_power.ht_ofdm_power[i]
3182 		    .s.dsp_predis_atten = 110 | (110 << 8);
3183 	}
3184 	txpower.tx_power.ht_ofdm_power[POWER_TABLE_NUM_HT_OFDM_ENTRIES]
3185 	    .s.ramon_tx_gain = 0x3f3f;
3186 	txpower.tx_power.ht_ofdm_power[POWER_TABLE_NUM_HT_OFDM_ENTRIES]
3187 	    .s.dsp_predis_atten = 110 | (110 << 8);
3188 	err = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &txpower,
3189 	    sizeof (txpower), 0);
3190 	if (err != IWK_SUCCESS) {
3191 		cmn_err(CE_WARN, "iwk_config(): failed to set txpower\n");
3192 		return (err);
3193 	}
3194 
3195 	/* add broadcast node so that we can send broadcast frame */
3196 	(void) memset(&node, 0, sizeof (node));
3197 	(void) memset(node.bssid, 0xff, 6);
3198 	node.id = IWK_BROADCAST_ID;
3199 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0);
3200 	if (err != IWK_SUCCESS) {
3201 		cmn_err(CE_WARN, "iwk_config(): "
3202 		    "failed to add broadcast node\n");
3203 		return (err);
3204 	}
3205 
3206 	/* TX_LINK_QUALITY cmd ? */
3207 	(void) memset(&link_quality, 0, sizeof (link_quality));
3208 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3209 		masks |= RATE_MCS_CCK_MSK;
3210 		masks |= RATE_MCS_ANT_B_MSK;
3211 		masks &= ~RATE_MCS_ANT_A_MSK;
3212 		link_quality.rate_n_flags[i] = iwk_rate_to_plcp(2) | masks;
3213 	}
3214 
3215 	link_quality.general_params.single_stream_ant_msk = 2;
3216 	link_quality.general_params.dual_stream_ant_msk = 3;
3217 	link_quality.agg_params.agg_dis_start_th = 3;
3218 	link_quality.agg_params.agg_time_limit = LE_16(4000);
3219 	link_quality.sta_id = IWK_BROADCAST_ID;
3220 	err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
3221 	    sizeof (link_quality), 0);
3222 	if (err != IWK_SUCCESS) {
3223 		cmn_err(CE_WARN, "iwk_config(): "
3224 		    "failed to config link quality table\n");
3225 		return (err);
3226 	}
3227 
3228 	return (IWK_SUCCESS);
3229 }
3230 
3231 static void
3232 iwk_stop_master(iwk_sc_t *sc)
3233 {
3234 	uint32_t tmp;
3235 	int n;
3236 
3237 	tmp = IWK_READ(sc, CSR_RESET);
3238 	IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER);
3239 
3240 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
3241 	if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) ==
3242 	    CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE)
3243 		return;
3244 
3245 	for (n = 0; n < 2000; n++) {
3246 		if (IWK_READ(sc, CSR_RESET) &
3247 		    CSR_RESET_REG_FLAG_MASTER_DISABLED)
3248 			break;
3249 		DELAY(1000);
3250 	}
3251 	if (n == 2000)
3252 		IWK_DBG((IWK_DEBUG_HW,
3253 		    "timeout waiting for master stop\n"));
3254 }
3255 
3256 static int
3257 iwk_power_up(iwk_sc_t *sc)
3258 {
3259 	uint32_t tmp;
3260 
3261 	iwk_mac_access_enter(sc);
3262 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3263 	tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC;
3264 	tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN;
3265 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3266 	iwk_mac_access_exit(sc);
3267 
3268 	DELAY(5000);
3269 	return (IWK_SUCCESS);
3270 }
3271 
3272 static int
3273 iwk_preinit(iwk_sc_t *sc)
3274 {
3275 	uint32_t tmp;
3276 	int n;
3277 	uint8_t vlink;
3278 
3279 	/* clear any pending interrupts */
3280 	IWK_WRITE(sc, CSR_INT, 0xffffffff);
3281 
3282 	tmp = IWK_READ(sc, CSR_GIO_CHICKEN_BITS);
3283 	IWK_WRITE(sc, CSR_GIO_CHICKEN_BITS,
3284 	    tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
3285 
3286 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
3287 	IWK_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
3288 
3289 	/* wait for clock ready */
3290 	for (n = 0; n < 1000; n++) {
3291 		if (IWK_READ(sc, CSR_GP_CNTRL) &
3292 		    CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY)
3293 			break;
3294 		DELAY(10);
3295 	}
3296 	if (n == 1000) {
3297 		return (ETIMEDOUT);
3298 	}
3299 	iwk_mac_access_enter(sc);
3300 	tmp = iwk_reg_read(sc, APMG_CLK_CTRL_REG);
3301 	iwk_reg_write(sc, APMG_CLK_CTRL_REG, tmp |
3302 	    APMG_CLK_REG_VAL_DMA_CLK_RQT | APMG_CLK_REG_VAL_BSM_CLK_RQT);
3303 
3304 	DELAY(20);
3305 	tmp = iwk_reg_read(sc, ALM_APMG_PCIDEV_STT);
3306 	iwk_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp |
3307 	    APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE);
3308 	iwk_mac_access_exit(sc);
3309 
3310 	IWK_WRITE(sc, CSR_INT_COALESCING, 512 / 32); /* ??? */
3311 
3312 	(void) iwk_power_up(sc);
3313 
3314 	if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) {
3315 		tmp = ddi_get32(sc->sc_cfg_handle,
3316 		    (uint32_t *)(sc->sc_cfg_base + 0xe8));
3317 		ddi_put32(sc->sc_cfg_handle,
3318 		    (uint32_t *)(sc->sc_cfg_base + 0xe8),
3319 		    tmp & ~(1 << 11));
3320 	}
3321 
3322 
3323 	vlink = ddi_get8(sc->sc_cfg_handle,
3324 	    (uint8_t *)(sc->sc_cfg_base + 0xf0));
3325 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0),
3326 	    vlink & ~2);
3327 
3328 	tmp = IWK_READ(sc, CSR_SW_VER);
3329 	tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
3330 	    CSR_HW_IF_CONFIG_REG_BIT_MAC_SI | CSR_HW_IF_CONFIG_REG_BIT_KEDRON_R;
3331 	IWK_WRITE(sc, CSR_SW_VER, tmp);
3332 
3333 	/* make sure power supply on each part of the hardware */
3334 	iwk_mac_access_enter(sc);
3335 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3336 	tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
3337 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3338 	DELAY(5);
3339 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3340 	tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
3341 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3342 	iwk_mac_access_exit(sc);
3343 	return (IWK_SUCCESS);
3344 }
3345 
3346 /*
3347  * set up semphore flag to own EEPROM
3348  */
3349 static int iwk_eep_sem_down(iwk_sc_t *sc)
3350 {
3351 	int count1, count2;
3352 	uint32_t tmp;
3353 
3354 	for (count1 = 0; count1 < 1000; count1++) {
3355 		tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG);
3356 		IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG,
3357 		    tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM);
3358 
3359 		for (count2 = 0; count2 < 2; count2++) {
3360 			if (IWK_READ(sc, CSR_HW_IF_CONFIG_REG) &
3361 			    CSR_HW_IF_CONFIG_REG_EEP_SEM)
3362 				return (IWK_SUCCESS);
3363 			DELAY(10000);
3364 		}
3365 	}
3366 	return (IWK_FAIL);
3367 }
3368 
3369 /*
3370  * reset semphore flag to release EEPROM
3371  */
3372 static void iwk_eep_sem_up(iwk_sc_t *sc)
3373 {
3374 	uint32_t tmp;
3375 
3376 	tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG);
3377 	IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG,
3378 	    tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM));
3379 }
3380 
3381 /*
3382  * This function load all infomation in eeprom into iwk_eep
3383  * structure in iwk_sc_t structure
3384  */
3385 static int iwk_eep_load(iwk_sc_t *sc)
3386 {
3387 	int i, rr;
3388 	uint32_t rv, tmp, eep_gp;
3389 	uint16_t addr, eep_sz = sizeof (sc->sc_eep_map);
3390 	uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map;
3391 
3392 	/* read eeprom gp register in CSR */
3393 	eep_gp = IWK_READ(sc, CSR_EEPROM_GP);
3394 	if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) ==
3395 	    CSR_EEPROM_GP_BAD_SIGNATURE) {
3396 		IWK_DBG((IWK_DEBUG_EEPROM, "not find eeprom\n"));
3397 		return (IWK_FAIL);
3398 	}
3399 
3400 	rr = iwk_eep_sem_down(sc);
3401 	if (rr != 0) {
3402 		IWK_DBG((IWK_DEBUG_EEPROM, "driver failed to own EEPROM\n"));
3403 		return (IWK_FAIL);
3404 	}
3405 
3406 	for (addr = 0; addr < eep_sz; addr += 2) {
3407 		IWK_WRITE(sc, CSR_EEPROM_REG, addr<<1);
3408 		tmp = IWK_READ(sc, CSR_EEPROM_REG);
3409 		IWK_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2));
3410 
3411 		for (i = 0; i < 10; i++) {
3412 			rv = IWK_READ(sc, CSR_EEPROM_REG);
3413 			if (rv & 1)
3414 				break;
3415 			DELAY(10);
3416 		}
3417 
3418 		if (!(rv & 1)) {
3419 			IWK_DBG((IWK_DEBUG_EEPROM,
3420 			    "time out when read eeprome\n"));
3421 			iwk_eep_sem_up(sc);
3422 			return (IWK_FAIL);
3423 		}
3424 
3425 		eep_p[addr/2] = rv >> 16;
3426 	}
3427 
3428 	iwk_eep_sem_up(sc);
3429 	return (IWK_SUCCESS);
3430 }
3431 
3432 /*
3433  * init mac address in ieee80211com_t struct
3434  */
3435 static void iwk_get_mac_from_eep(iwk_sc_t *sc)
3436 {
3437 	ieee80211com_t *ic = &sc->sc_ic;
3438 	struct iwk_eep *ep = &sc->sc_eep_map;
3439 
3440 	IEEE80211_ADDR_COPY(ic->ic_macaddr, ep->mac_address);
3441 
3442 	IWK_DBG((IWK_DEBUG_EEPROM, "mac:%2x:%2x:%2x:%2x:%2x:%2x\n",
3443 	    ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2],
3444 	    ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5]));
3445 }
3446 
3447 static int
3448 iwk_init(iwk_sc_t *sc)
3449 {
3450 	int qid, n, err;
3451 	clock_t clk;
3452 	uint32_t tmp;
3453 
3454 	mutex_enter(&sc->sc_glock);
3455 	sc->sc_flags &= ~IWK_F_FW_INIT;
3456 
3457 	(void) iwk_preinit(sc);
3458 
3459 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
3460 	if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) {
3461 		cmn_err(CE_WARN, "iwk_init(): Radio transmitter is off\n");
3462 		goto fail1;
3463 	}
3464 
3465 	/* init Rx ring */
3466 	iwk_mac_access_enter(sc);
3467 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
3468 
3469 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
3470 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
3471 	    sc->sc_rxq.dma_desc.cookie.dmac_address >> 8);
3472 
3473 	IWK_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG,
3474 	    ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address +
3475 	    offsetof(struct iwk_shared, val0)) >> 4));
3476 
3477 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG,
3478 	    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
3479 	    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
3480 	    IWK_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
3481 	    (RX_QUEUE_SIZE_LOG <<
3482 	    FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
3483 	iwk_mac_access_exit(sc);
3484 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG,
3485 	    (RX_QUEUE_SIZE - 1) & ~0x7);
3486 
3487 	/* init Tx rings */
3488 	iwk_mac_access_enter(sc);
3489 	iwk_reg_write(sc, SCD_TXFACT, 0);
3490 
3491 	/* keep warn page */
3492 	iwk_reg_write(sc, IWK_FH_KW_MEM_ADDR_REG,
3493 	    sc->sc_dma_kw.cookie.dmac_address >> 4);
3494 
3495 	for (qid = 0; qid < IWK_NUM_QUEUES; qid++) {
3496 		IWK_WRITE(sc, FH_MEM_CBBC_QUEUE(qid),
3497 		    sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8);
3498 		IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(qid),
3499 		    IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
3500 		    IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
3501 	}
3502 	iwk_mac_access_exit(sc);
3503 
3504 	/* clear "radio off" and "disable command" bits */
3505 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
3506 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR,
3507 	    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3508 
3509 	/* clear any pending interrupts */
3510 	IWK_WRITE(sc, CSR_INT, 0xffffffff);
3511 
3512 	/* enable interrupts */
3513 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
3514 
3515 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
3516 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
3517 
3518 	/*
3519 	 * backup ucode data part for future use.
3520 	 */
3521 	(void) memcpy(sc->sc_dma_fw_data_bak.mem_va,
3522 	    sc->sc_dma_fw_data.mem_va,
3523 	    sc->sc_dma_fw_data.alength);
3524 
3525 	for (n = 0; n < 2; n++) {
3526 		/* load firmware init segment into NIC */
3527 		err = iwk_load_firmware(sc);
3528 		if (err != IWK_SUCCESS) {
3529 			cmn_err(CE_WARN, "iwk_init(): "
3530 			    "failed to setup boot firmware\n");
3531 			continue;
3532 		}
3533 
3534 		/* now press "execute" start running */
3535 		IWK_WRITE(sc, CSR_RESET, 0);
3536 		break;
3537 	}
3538 	if (n == 2) {
3539 		cmn_err(CE_WARN, "iwk_init(): " "failed to load firmware\n");
3540 		goto fail1;
3541 	}
3542 	/* ..and wait at most one second for adapter to initialize */
3543 	clk = ddi_get_lbolt() + drv_usectohz(2000000);
3544 	while (!(sc->sc_flags & IWK_F_FW_INIT)) {
3545 		if (cv_timedwait(&sc->sc_fw_cv, &sc->sc_glock, clk) < 0)
3546 			break;
3547 	}
3548 	if (!(sc->sc_flags & IWK_F_FW_INIT)) {
3549 		cmn_err(CE_WARN,
3550 		    "iwk_init(): timeout waiting for firmware init\n");
3551 		goto fail1;
3552 	}
3553 
3554 	/*
3555 	 * at this point, the firmware is loaded OK, then config the hardware
3556 	 * with the ucode API, including rxon, txpower, etc.
3557 	 */
3558 	err = iwk_config(sc);
3559 	if (err) {
3560 		cmn_err(CE_WARN, "iwk_init(): failed to configure device\n");
3561 		goto fail1;
3562 	}
3563 
3564 	/* at this point, hardware may receive beacons :) */
3565 	mutex_exit(&sc->sc_glock);
3566 	return (IWK_SUCCESS);
3567 
3568 fail1:
3569 	err = IWK_FAIL;
3570 	mutex_exit(&sc->sc_glock);
3571 	return (err);
3572 }
3573 
3574 static void
3575 iwk_stop(iwk_sc_t *sc)
3576 {
3577 	uint32_t tmp;
3578 	int i;
3579 
3580 
3581 	mutex_enter(&sc->sc_glock);
3582 
3583 	IWK_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3584 	/* disable interrupts */
3585 	IWK_WRITE(sc, CSR_INT_MASK, 0);
3586 	IWK_WRITE(sc, CSR_INT, CSR_INI_SET_MASK);
3587 	IWK_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff);
3588 
3589 	/* reset all Tx rings */
3590 	for (i = 0; i < IWK_NUM_QUEUES; i++)
3591 		iwk_reset_tx_ring(sc, &sc->sc_txq[i]);
3592 
3593 	/* reset Rx ring */
3594 	iwk_reset_rx_ring(sc);
3595 
3596 	iwk_mac_access_enter(sc);
3597 	iwk_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT);
3598 	iwk_mac_access_exit(sc);
3599 
3600 	DELAY(5);
3601 
3602 	iwk_stop_master(sc);
3603 
3604 	sc->sc_tx_timer = 0;
3605 	tmp = IWK_READ(sc, CSR_RESET);
3606 	IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET);
3607 	mutex_exit(&sc->sc_glock);
3608 }
3609 
3610 /*
3611  * Naive implementation of the Adaptive Multi Rate Retry algorithm:
3612  * "IEEE 802.11 Rate Adaptation: A Practical Approach"
3613  * Mathieu Lacage, Hossein Manshaei, Thierry Turletti
3614  * INRIA Sophia - Projet Planete
3615  * http://www-sop.inria.fr/rapports/sophia/RR-5208.html
3616  */
3617 #define	is_success(amrr)	\
3618 	((amrr)->retrycnt < (amrr)->txcnt / 10)
3619 #define	is_failure(amrr)	\
3620 	((amrr)->retrycnt > (amrr)->txcnt / 3)
3621 #define	is_enough(amrr)		\
3622 	((amrr)->txcnt > 100)
3623 #define	is_min_rate(in)		\
3624 	((in)->in_txrate == 0)
3625 #define	is_max_rate(in)		\
3626 	((in)->in_txrate == (in)->in_rates.ir_nrates - 1)
3627 #define	increase_rate(in)	\
3628 	((in)->in_txrate++)
3629 #define	decrease_rate(in)	\
3630 	((in)->in_txrate--)
3631 #define	reset_cnt(amrr)		\
3632 	{ (amrr)->txcnt = (amrr)->retrycnt = 0; }
3633 
3634 #define	IWK_AMRR_MIN_SUCCESS_THRESHOLD	 1
3635 #define	IWK_AMRR_MAX_SUCCESS_THRESHOLD	15
3636 
3637 static void
3638 iwk_amrr_init(iwk_amrr_t *amrr)
3639 {
3640 	amrr->success = 0;
3641 	amrr->recovery = 0;
3642 	amrr->txcnt = amrr->retrycnt = 0;
3643 	amrr->success_threshold = IWK_AMRR_MIN_SUCCESS_THRESHOLD;
3644 }
3645 
3646 static void
3647 iwk_amrr_timeout(iwk_sc_t *sc)
3648 {
3649 	ieee80211com_t *ic = &sc->sc_ic;
3650 
3651 	IWK_DBG((IWK_DEBUG_RATECTL, "iwk_amrr_timeout() enter\n"));
3652 	if (ic->ic_opmode == IEEE80211_M_STA)
3653 		iwk_amrr_ratectl(NULL, ic->ic_bss);
3654 	else
3655 		ieee80211_iterate_nodes(&ic->ic_sta, iwk_amrr_ratectl, NULL);
3656 	sc->sc_clk = ddi_get_lbolt();
3657 }
3658 
3659 /* ARGSUSED */
3660 static void
3661 iwk_amrr_ratectl(void *arg, ieee80211_node_t *in)
3662 {
3663 	iwk_amrr_t *amrr = (iwk_amrr_t *)in;
3664 	int need_change = 0;
3665 
3666 	if (is_success(amrr) && is_enough(amrr)) {
3667 		amrr->success++;
3668 		if (amrr->success >= amrr->success_threshold &&
3669 		    !is_max_rate(in)) {
3670 			amrr->recovery = 1;
3671 			amrr->success = 0;
3672 			increase_rate(in);
3673 			IWK_DBG((IWK_DEBUG_RATECTL,
3674 			    "AMRR increasing rate %d (txcnt=%d retrycnt=%d)\n",
3675 			    in->in_txrate, amrr->txcnt, amrr->retrycnt));
3676 			need_change = 1;
3677 		} else {
3678 			amrr->recovery = 0;
3679 		}
3680 	} else if (is_failure(amrr)) {
3681 		amrr->success = 0;
3682 		if (!is_min_rate(in)) {
3683 			if (amrr->recovery) {
3684 				amrr->success_threshold++;
3685 				if (amrr->success_threshold >
3686 				    IWK_AMRR_MAX_SUCCESS_THRESHOLD)
3687 					amrr->success_threshold =
3688 					    IWK_AMRR_MAX_SUCCESS_THRESHOLD;
3689 			} else {
3690 				amrr->success_threshold =
3691 				    IWK_AMRR_MIN_SUCCESS_THRESHOLD;
3692 			}
3693 			decrease_rate(in);
3694 			IWK_DBG((IWK_DEBUG_RATECTL,
3695 			    "AMRR decreasing rate %d (txcnt=%d retrycnt=%d)\n",
3696 			    in->in_txrate, amrr->txcnt, amrr->retrycnt));
3697 			need_change = 1;
3698 		}
3699 		amrr->recovery = 0;	/* paper is incorrect */
3700 	}
3701 
3702 	if (is_enough(amrr) || need_change)
3703 		reset_cnt(amrr);
3704 }
3705