xref: /illumos-gate/usr/src/uts/common/io/iwk/iwk2.c (revision 2264ca7f5db194583c672cb5779a67f52bcd92a9)
1 /*
2  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright (c) 2007, Intel Corporation
8  * All rights reserved.
9  */
10 
11 /*
12  * Copyright (c) 2006
13  * Copyright (c) 2007
14  *	Damien Bergamini <damien.bergamini@free.fr>
15  *
16  * Permission to use, copy, modify, and distribute this software for any
17  * purpose with or without fee is hereby granted, provided that the above
18  * copyright notice and this permission notice appear in all copies.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
21  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
22  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
24  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
25  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
26  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27  */
28 
29 /*
30  * Driver for Intel PRO/Wireless 4965AGN(kedron) 802.11 network adapters.
31  */
32 
33 #include <sys/types.h>
34 #include <sys/byteorder.h>
35 #include <sys/conf.h>
36 #include <sys/cmn_err.h>
37 #include <sys/stat.h>
38 #include <sys/ddi.h>
39 #include <sys/sunddi.h>
40 #include <sys/strsubr.h>
41 #include <sys/ethernet.h>
42 #include <inet/common.h>
43 #include <inet/nd.h>
44 #include <inet/mi.h>
45 #include <sys/note.h>
46 #include <sys/stream.h>
47 #include <sys/strsun.h>
48 #include <sys/modctl.h>
49 #include <sys/devops.h>
50 #include <sys/dlpi.h>
51 #include <sys/mac_provider.h>
52 #include <sys/mac_wifi.h>
53 #include <sys/net80211.h>
54 #include <sys/net80211_proto.h>
55 #include <sys/varargs.h>
56 #include <sys/policy.h>
57 #include <sys/pci.h>
58 
59 #include "iwk_calibration.h"
60 #include "iwk_hw.h"
61 #include "iwk_eeprom.h"
62 #include "iwk2_var.h"
63 #include <inet/wifi_ioctl.h>
64 
65 #ifdef DEBUG
66 #define	IWK_DEBUG_80211		(1 << 0)
67 #define	IWK_DEBUG_CMD		(1 << 1)
68 #define	IWK_DEBUG_DMA		(1 << 2)
69 #define	IWK_DEBUG_EEPROM	(1 << 3)
70 #define	IWK_DEBUG_FW		(1 << 4)
71 #define	IWK_DEBUG_HW		(1 << 5)
72 #define	IWK_DEBUG_INTR		(1 << 6)
73 #define	IWK_DEBUG_MRR		(1 << 7)
74 #define	IWK_DEBUG_PIO		(1 << 8)
75 #define	IWK_DEBUG_RX		(1 << 9)
76 #define	IWK_DEBUG_SCAN		(1 << 10)
77 #define	IWK_DEBUG_TX		(1 << 11)
78 #define	IWK_DEBUG_RATECTL	(1 << 12)
79 #define	IWK_DEBUG_RADIO		(1 << 13)
80 #define	IWK_DEBUG_RESUME	(1 << 14)
81 #define	IWK_DEBUG_CALIBRATION	(1 << 15)
82 uint32_t iwk_dbg_flags = 0;
83 #define	IWK_DBG(x) \
84 	iwk_dbg x
85 #else
86 #define	IWK_DBG(x)
87 #endif
88 
89 static void	*iwk_soft_state_p = NULL;
90 static uint8_t iwk_fw_bin [] = {
91 #include "fw-iw/iw4965.ucode.hex"
92 };
93 
94 /* DMA attributes for a shared page */
95 static ddi_dma_attr_t sh_dma_attr = {
96 	DMA_ATTR_V0,	/* version of this structure */
97 	0,		/* lowest usable address */
98 	0xffffffffU,	/* highest usable address */
99 	0xffffffffU,	/* maximum DMAable byte count */
100 	0x1000,		/* alignment in bytes */
101 	0x1000,		/* burst sizes (any?) */
102 	1,		/* minimum transfer */
103 	0xffffffffU,	/* maximum transfer */
104 	0xffffffffU,	/* maximum segment length */
105 	1,		/* maximum number of segments */
106 	1,		/* granularity */
107 	0,		/* flags (reserved) */
108 };
109 
110 /* DMA attributes for a keep warm DRAM descriptor */
111 static ddi_dma_attr_t kw_dma_attr = {
112 	DMA_ATTR_V0,	/* version of this structure */
113 	0,		/* lowest usable address */
114 	0xffffffffU,	/* highest usable address */
115 	0xffffffffU,	/* maximum DMAable byte count */
116 	0x1000,		/* alignment in bytes */
117 	0x1000,		/* burst sizes (any?) */
118 	1,		/* minimum transfer */
119 	0xffffffffU,	/* maximum transfer */
120 	0xffffffffU,	/* maximum segment length */
121 	1,		/* maximum number of segments */
122 	1,		/* granularity */
123 	0,		/* flags (reserved) */
124 };
125 
126 /* DMA attributes for a ring descriptor */
127 static ddi_dma_attr_t ring_desc_dma_attr = {
128 	DMA_ATTR_V0,	/* version of this structure */
129 	0,		/* lowest usable address */
130 	0xffffffffU,	/* highest usable address */
131 	0xffffffffU,	/* maximum DMAable byte count */
132 	0x100,		/* alignment in bytes */
133 	0x100,		/* burst sizes (any?) */
134 	1,		/* minimum transfer */
135 	0xffffffffU,	/* maximum transfer */
136 	0xffffffffU,	/* maximum segment length */
137 	1,		/* maximum number of segments */
138 	1,		/* granularity */
139 	0,		/* flags (reserved) */
140 };
141 
142 /* DMA attributes for a cmd */
143 static ddi_dma_attr_t cmd_dma_attr = {
144 	DMA_ATTR_V0,	/* version of this structure */
145 	0,		/* lowest usable address */
146 	0xffffffffU,	/* highest usable address */
147 	0xffffffffU,	/* maximum DMAable byte count */
148 	4,		/* alignment in bytes */
149 	0x100,		/* burst sizes (any?) */
150 	1,		/* minimum transfer */
151 	0xffffffffU,	/* maximum transfer */
152 	0xffffffffU,	/* maximum segment length */
153 	1,		/* maximum number of segments */
154 	1,		/* granularity */
155 	0,		/* flags (reserved) */
156 };
157 
158 /* DMA attributes for a rx buffer */
159 static ddi_dma_attr_t rx_buffer_dma_attr = {
160 	DMA_ATTR_V0,	/* version of this structure */
161 	0,		/* lowest usable address */
162 	0xffffffffU,	/* highest usable address */
163 	0xffffffffU,	/* maximum DMAable byte count */
164 	0x100,		/* alignment in bytes */
165 	0x100,		/* burst sizes (any?) */
166 	1,		/* minimum transfer */
167 	0xffffffffU,	/* maximum transfer */
168 	0xffffffffU,	/* maximum segment length */
169 	1,		/* maximum number of segments */
170 	1,		/* granularity */
171 	0,		/* flags (reserved) */
172 };
173 
174 /*
175  * DMA attributes for a tx buffer.
176  * the maximum number of segments is 4 for the hardware.
177  * now all the wifi drivers put the whole frame in a single
178  * descriptor, so we define the maximum  number of segments 1,
179  * just the same as the rx_buffer. we consider leverage the HW
180  * ability in the future, that is why we don't define rx and tx
181  * buffer_dma_attr as the same.
182  */
183 static ddi_dma_attr_t tx_buffer_dma_attr = {
184 	DMA_ATTR_V0,	/* version of this structure */
185 	0,		/* lowest usable address */
186 	0xffffffffU,	/* highest usable address */
187 	0xffffffffU,	/* maximum DMAable byte count */
188 	4,		/* alignment in bytes */
189 	0x100,		/* burst sizes (any?) */
190 	1,		/* minimum transfer */
191 	0xffffffffU,	/* maximum transfer */
192 	0xffffffffU,	/* maximum segment length */
193 	1,		/* maximum number of segments */
194 	1,		/* granularity */
195 	0,		/* flags (reserved) */
196 };
197 
198 /* DMA attributes for text and data part in the firmware */
199 static ddi_dma_attr_t fw_dma_attr = {
200 	DMA_ATTR_V0,	/* version of this structure */
201 	0,		/* lowest usable address */
202 	0xffffffffU,	/* highest usable address */
203 	0x7fffffff,	/* maximum DMAable byte count */
204 	0x10,		/* alignment in bytes */
205 	0x100,		/* burst sizes (any?) */
206 	1,		/* minimum transfer */
207 	0xffffffffU,	/* maximum transfer */
208 	0xffffffffU,	/* maximum segment length */
209 	1,		/* maximum number of segments */
210 	1,		/* granularity */
211 	0,		/* flags (reserved) */
212 };
213 
214 
215 /* regs access attributes */
216 static ddi_device_acc_attr_t iwk_reg_accattr = {
217 	DDI_DEVICE_ATTR_V0,
218 	DDI_STRUCTURE_LE_ACC,
219 	DDI_STRICTORDER_ACC,
220 	DDI_DEFAULT_ACC
221 };
222 
223 /* DMA access attributes */
224 static ddi_device_acc_attr_t iwk_dma_accattr = {
225 	DDI_DEVICE_ATTR_V0,
226 	DDI_NEVERSWAP_ACC,
227 	DDI_STRICTORDER_ACC,
228 	DDI_DEFAULT_ACC
229 };
230 
231 static int	iwk_ring_init(iwk_sc_t *);
232 static void	iwk_ring_free(iwk_sc_t *);
233 static int	iwk_alloc_shared(iwk_sc_t *);
234 static void	iwk_free_shared(iwk_sc_t *);
235 static int	iwk_alloc_kw(iwk_sc_t *);
236 static void	iwk_free_kw(iwk_sc_t *);
237 static int	iwk_alloc_fw_dma(iwk_sc_t *);
238 static void	iwk_free_fw_dma(iwk_sc_t *);
239 static int	iwk_alloc_rx_ring(iwk_sc_t *);
240 static void	iwk_reset_rx_ring(iwk_sc_t *);
241 static void	iwk_free_rx_ring(iwk_sc_t *);
242 static int	iwk_alloc_tx_ring(iwk_sc_t *, iwk_tx_ring_t *,
243     int, int);
244 static void	iwk_reset_tx_ring(iwk_sc_t *, iwk_tx_ring_t *);
245 static void	iwk_free_tx_ring(iwk_sc_t *, iwk_tx_ring_t *);
246 
247 static ieee80211_node_t *iwk_node_alloc(ieee80211com_t *);
248 static void	iwk_node_free(ieee80211_node_t *);
249 static int	iwk_newstate(ieee80211com_t *, enum ieee80211_state, int);
250 static int	iwk_key_set(ieee80211com_t *, const struct ieee80211_key *,
251     const uint8_t mac[IEEE80211_ADDR_LEN]);
252 static void	iwk_mac_access_enter(iwk_sc_t *);
253 static void	iwk_mac_access_exit(iwk_sc_t *);
254 static uint32_t	iwk_reg_read(iwk_sc_t *, uint32_t);
255 static void	iwk_reg_write(iwk_sc_t *, uint32_t, uint32_t);
256 static void	iwk_reg_write_region_4(iwk_sc_t *, uint32_t,
257 		    uint32_t *, int);
258 static int	iwk_load_firmware(iwk_sc_t *);
259 static void	iwk_rx_intr(iwk_sc_t *, iwk_rx_desc_t *,
260 		    iwk_rx_data_t *);
261 static void	iwk_tx_intr(iwk_sc_t *, iwk_rx_desc_t *,
262 		    iwk_rx_data_t *);
263 static void	iwk_cmd_intr(iwk_sc_t *, iwk_rx_desc_t *);
264 static uint_t   iwk_intr(caddr_t, caddr_t);
265 static int	iwk_eep_load(iwk_sc_t *sc);
266 static void	iwk_get_mac_from_eep(iwk_sc_t *sc);
267 static int	iwk_eep_sem_down(iwk_sc_t *sc);
268 static void	iwk_eep_sem_up(iwk_sc_t *sc);
269 static uint_t   iwk_rx_softintr(caddr_t, caddr_t);
270 static uint8_t	iwk_rate_to_plcp(int);
271 static int	iwk_cmd(iwk_sc_t *, int, const void *, int, int);
272 static void	iwk_set_led(iwk_sc_t *, uint8_t, uint8_t, uint8_t);
273 static int	iwk_hw_set_before_auth(iwk_sc_t *);
274 static int	iwk_scan(iwk_sc_t *);
275 static int	iwk_config(iwk_sc_t *);
276 static void	iwk_stop_master(iwk_sc_t *);
277 static int	iwk_power_up(iwk_sc_t *);
278 static int	iwk_preinit(iwk_sc_t *);
279 static int	iwk_init(iwk_sc_t *);
280 static void	iwk_stop(iwk_sc_t *);
281 static void	iwk_amrr_init(iwk_amrr_t *);
282 static void	iwk_amrr_timeout(iwk_sc_t *);
283 static void	iwk_amrr_ratectl(void *, ieee80211_node_t *);
284 static int32_t	iwk_curr_tempera(iwk_sc_t *sc);
285 static int	iwk_tx_power_calibration(iwk_sc_t *sc);
286 static inline int	iwk_is_24G_band(iwk_sc_t *sc);
287 static inline int	iwk_is_fat_channel(iwk_sc_t *sc);
288 static int	iwk_txpower_grp(uint16_t channel);
289 static struct	iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc,
290     uint16_t channel,
291     int is_24G, int is_fat, int is_hi_chan);
292 static int32_t	iwk_band_number(iwk_sc_t *sc, uint16_t channel);
293 static int	iwk_division(int32_t num, int32_t denom, int32_t *res);
294 static int32_t	iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1,
295     int32_t x2, int32_t y2);
296 static int	iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel,
297     struct iwk_eep_calib_channel_info *chan_info);
298 static int32_t	iwk_voltage_compensation(int32_t eep_voltage,
299     int32_t curr_voltage);
300 static int32_t	iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G);
301 static int	iwk_txpower_table_cmd_init(iwk_sc_t *sc,
302     struct iwk_tx_power_db *tp_db);
303 static void	iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc);
304 static int	iwk_is_associated(iwk_sc_t *sc);
305 static int	iwk_rxgain_diff_init(iwk_sc_t *sc);
306 static int	iwk_rxgain_diff(iwk_sc_t *sc);
307 static int	iwk_rx_sens_init(iwk_sc_t *sc);
308 static int	iwk_rx_sens(iwk_sc_t *sc);
309 static int	iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time);
310 static int	iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time);
311 static void	iwk_recv_mgmt(struct ieee80211com *ic, mblk_t *mp,
312     struct ieee80211_node *in, int subtype, int rssi, uint32_t rstamp);
313 
314 static void	iwk_write_event_log(iwk_sc_t *);
315 static void	iwk_write_error_log(iwk_sc_t *);
316 
317 static int	iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
318 static int	iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
319 static int	iwk_quiesce(dev_info_t *dip);
320 
321 /*
322  * GLD specific operations
323  */
324 static int	iwk_m_stat(void *arg, uint_t stat, uint64_t *val);
325 static int	iwk_m_start(void *arg);
326 static void	iwk_m_stop(void *arg);
327 static int	iwk_m_unicst(void *arg, const uint8_t *macaddr);
328 static int	iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m);
329 static int	iwk_m_promisc(void *arg, boolean_t on);
330 static mblk_t 	*iwk_m_tx(void *arg, mblk_t *mp);
331 static void	iwk_m_ioctl(void *arg, queue_t *wq, mblk_t *mp);
332 static int	iwk_m_setprop(void *arg, const char *pr_name,
333 	mac_prop_id_t wldp_pr_name, uint_t wldp_length, const void *wldp_buf);
334 static int	iwk_m_getprop(void *arg, const char *pr_name,
335 	mac_prop_id_t wldp_pr_name, uint_t pr_flags, uint_t wldp_length,
336 	void *wldp_buf, uint_t *perm);
337 static void	iwk_destroy_locks(iwk_sc_t *sc);
338 static int	iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type);
339 static void	iwk_thread(iwk_sc_t *sc);
340 static void	iwk_watchdog(void *arg);
341 static int	iwk_run_state_config_ibss(ieee80211com_t *ic);
342 static int	iwk_run_state_config_sta(ieee80211com_t *ic);
343 static int	iwk_start_tx_beacon(ieee80211com_t *ic);
344 static int	iwk_clean_add_node_ibss(struct ieee80211com *ic,
345     uint8_t addr[IEEE80211_ADDR_LEN], uint8_t *index2);
346 
347 /*
348  * Supported rates for 802.11b/g modes (in 500Kbps unit).
349  * 11a and 11n support will be added later.
350  */
351 static const struct ieee80211_rateset iwk_rateset_11b =
352 	{ 4, { 2, 4, 11, 22 } };
353 
354 static const struct ieee80211_rateset iwk_rateset_11g =
355 	{ 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
356 
357 /*
358  * For mfthread only
359  */
360 extern pri_t minclsyspri;
361 
362 #define	DRV_NAME_4965	"iwk"
363 
364 /*
365  * Module Loading Data & Entry Points
366  */
367 DDI_DEFINE_STREAM_OPS(iwk_devops, nulldev, nulldev, iwk_attach,
368     iwk_detach, nodev, NULL, D_MP, NULL, iwk_quiesce);
369 
370 static struct modldrv iwk_modldrv = {
371 	&mod_driverops,
372 	"Intel(R) 4965AGN driver(N)",
373 	&iwk_devops
374 };
375 
376 static struct modlinkage iwk_modlinkage = {
377 	MODREV_1,
378 	&iwk_modldrv,
379 	NULL
380 };
381 
382 int
383 _init(void)
384 {
385 	int	status;
386 
387 	status = ddi_soft_state_init(&iwk_soft_state_p,
388 	    sizeof (iwk_sc_t), 1);
389 	if (status != DDI_SUCCESS)
390 		return (status);
391 
392 	mac_init_ops(&iwk_devops, DRV_NAME_4965);
393 	status = mod_install(&iwk_modlinkage);
394 	if (status != DDI_SUCCESS) {
395 		mac_fini_ops(&iwk_devops);
396 		ddi_soft_state_fini(&iwk_soft_state_p);
397 	}
398 
399 	return (status);
400 }
401 
402 int
403 _fini(void)
404 {
405 	int status;
406 
407 	status = mod_remove(&iwk_modlinkage);
408 	if (status == DDI_SUCCESS) {
409 		mac_fini_ops(&iwk_devops);
410 		ddi_soft_state_fini(&iwk_soft_state_p);
411 	}
412 
413 	return (status);
414 }
415 
416 int
417 _info(struct modinfo *mip)
418 {
419 	return (mod_info(&iwk_modlinkage, mip));
420 }
421 
422 /*
423  * Mac Call Back entries
424  */
425 mac_callbacks_t	iwk_m_callbacks = {
426 	MC_IOCTL | MC_SETPROP | MC_GETPROP,
427 	iwk_m_stat,
428 	iwk_m_start,
429 	iwk_m_stop,
430 	iwk_m_promisc,
431 	iwk_m_multicst,
432 	iwk_m_unicst,
433 	iwk_m_tx,
434 	iwk_m_ioctl,
435 	NULL,
436 	NULL,
437 	NULL,
438 	iwk_m_setprop,
439 	iwk_m_getprop
440 };
441 
442 #ifdef DEBUG
443 void
444 iwk_dbg(uint32_t flags, const char *fmt, ...)
445 {
446 	va_list	ap;
447 
448 	if (flags & iwk_dbg_flags) {
449 		va_start(ap, fmt);
450 		vcmn_err(CE_NOTE, fmt, ap);
451 		va_end(ap);
452 	}
453 }
454 #endif
455 
456 /*
457  * device operations
458  */
459 int
460 iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
461 {
462 	iwk_sc_t		*sc;
463 	ieee80211com_t	*ic;
464 	int			instance, err, i;
465 	char			strbuf[32];
466 	wifi_data_t		wd = { 0 };
467 	mac_register_t		*macp;
468 
469 	int			intr_type;
470 	int			intr_count;
471 	int			intr_actual;
472 
473 	switch (cmd) {
474 	case DDI_ATTACH:
475 		break;
476 	case DDI_RESUME:
477 		sc = ddi_get_soft_state(iwk_soft_state_p,
478 		    ddi_get_instance(dip));
479 		ASSERT(sc != NULL);
480 
481 		mutex_enter(&sc->sc_glock);
482 		sc->sc_flags &= ~IWK_F_SUSPEND;
483 		mutex_exit(&sc->sc_glock);
484 
485 		if (sc->sc_flags & IWK_F_RUNNING)
486 			(void) iwk_init(sc);
487 
488 		mutex_enter(&sc->sc_glock);
489 		sc->sc_flags |= IWK_F_LAZY_RESUME;
490 		mutex_exit(&sc->sc_glock);
491 
492 		IWK_DBG((IWK_DEBUG_RESUME, "iwk: resume\n"));
493 		return (DDI_SUCCESS);
494 	default:
495 		err = DDI_FAILURE;
496 		goto attach_fail1;
497 	}
498 
499 	instance = ddi_get_instance(dip);
500 	err = ddi_soft_state_zalloc(iwk_soft_state_p, instance);
501 	if (err != DDI_SUCCESS) {
502 		cmn_err(CE_WARN,
503 		    "iwk_attach(): failed to allocate soft state\n");
504 		goto attach_fail1;
505 	}
506 	sc = ddi_get_soft_state(iwk_soft_state_p, instance);
507 	sc->sc_dip = dip;
508 
509 	err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0,
510 	    &iwk_reg_accattr, &sc->sc_cfg_handle);
511 	if (err != DDI_SUCCESS) {
512 		cmn_err(CE_WARN,
513 		    "iwk_attach(): failed to map config spaces regs\n");
514 		goto attach_fail2;
515 	}
516 	sc->sc_rev = ddi_get8(sc->sc_cfg_handle,
517 	    (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID));
518 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0x41), 0);
519 	sc->sc_clsz = ddi_get16(sc->sc_cfg_handle,
520 	    (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ));
521 	if (!sc->sc_clsz)
522 		sc->sc_clsz = 16;
523 	sc->sc_clsz = (sc->sc_clsz << 2);
524 	sc->sc_dmabuf_sz = roundup(0x1000 + sizeof (struct ieee80211_frame) +
525 	    IEEE80211_MTU + IEEE80211_CRC_LEN +
526 	    (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
527 	    IEEE80211_WEP_CRCLEN), sc->sc_clsz);
528 	/*
529 	 * Map operating registers
530 	 */
531 	err = ddi_regs_map_setup(dip, 1, &sc->sc_base,
532 	    0, 0, &iwk_reg_accattr, &sc->sc_handle);
533 	if (err != DDI_SUCCESS) {
534 		cmn_err(CE_WARN,
535 		    "iwk_attach(): failed to map device regs\n");
536 		goto attach_fail2a;
537 	}
538 
539 	err = ddi_intr_get_supported_types(dip, &intr_type);
540 	if ((err != DDI_SUCCESS) || (!(intr_type & DDI_INTR_TYPE_FIXED))) {
541 		cmn_err(CE_WARN, "iwk_attach(): "
542 		    "Fixed type interrupt is not supported\n");
543 		goto attach_fail_intr_a;
544 	}
545 
546 	err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &intr_count);
547 	if ((err != DDI_SUCCESS) || (intr_count != 1)) {
548 		cmn_err(CE_WARN, "iwk_attach(): "
549 		    "No fixed interrupts\n");
550 		goto attach_fail_intr_a;
551 	}
552 
553 	sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
554 
555 	err = ddi_intr_alloc(dip, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0,
556 	    intr_count, &intr_actual, 0);
557 	if ((err != DDI_SUCCESS) || (intr_actual != 1)) {
558 		cmn_err(CE_WARN, "iwk_attach(): "
559 		    "ddi_intr_alloc() failed 0x%x\n", err);
560 		goto attach_fail_intr_b;
561 	}
562 
563 	err = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri);
564 	if (err != DDI_SUCCESS) {
565 		cmn_err(CE_WARN, "iwk_attach(): "
566 		    "ddi_intr_get_pri() failed 0x%x\n", err);
567 		goto attach_fail_intr_c;
568 	}
569 
570 	mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER,
571 	    DDI_INTR_PRI(sc->sc_intr_pri));
572 	mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER,
573 	    DDI_INTR_PRI(sc->sc_intr_pri));
574 	mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER,
575 	    DDI_INTR_PRI(sc->sc_intr_pri));
576 	mutex_init(&sc->sc_ibss.node_tb_lock, NULL, MUTEX_DRIVER,
577 	    DDI_INTR_PRI(sc->sc_intr_pri));
578 
579 	cv_init(&sc->sc_fw_cv, NULL, CV_DRIVER, NULL);
580 	cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL);
581 	cv_init(&sc->sc_tx_cv, "tx-ring", CV_DRIVER, NULL);
582 	/*
583 	 * initialize the mfthread
584 	 */
585 	cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL);
586 	sc->sc_mf_thread = NULL;
587 	sc->sc_mf_thread_switch = 0;
588 
589 	/*
590 	 * Allocate shared page.
591 	 */
592 	err = iwk_alloc_shared(sc);
593 	if (err != DDI_SUCCESS) {
594 		cmn_err(CE_WARN, "iwk_attach(): "
595 		    "failed to allocate shared page\n");
596 		goto attach_fail3;
597 	}
598 
599 	/*
600 	 * Allocate keep warm page.
601 	 */
602 	err = iwk_alloc_kw(sc);
603 	if (err != DDI_SUCCESS) {
604 		cmn_err(CE_WARN, "iwk_attach(): "
605 		    "failed to allocate keep warm page\n");
606 		goto attach_fail3a;
607 	}
608 
609 	/*
610 	 * Do some necessary hardware initializations.
611 	 */
612 	err = iwk_preinit(sc);
613 	if (err != DDI_SUCCESS) {
614 		cmn_err(CE_WARN, "iwk_attach(): "
615 		    "failed to init hardware\n");
616 		goto attach_fail4;
617 	}
618 
619 	/* initialize EEPROM */
620 	err = iwk_eep_load(sc);  /* get hardware configurations from eeprom */
621 	if (err != 0) {
622 		cmn_err(CE_WARN, "iwk_attach(): failed to load eeprom\n");
623 		goto attach_fail4;
624 	}
625 
626 	if (sc->sc_eep_map.calib_version < EEP_TX_POWER_VERSION_NEW) {
627 		cmn_err(CE_WARN, "older EEPROM detected\n");
628 		goto attach_fail4;
629 	}
630 
631 	iwk_get_mac_from_eep(sc);
632 
633 	err = iwk_ring_init(sc);
634 	if (err != DDI_SUCCESS) {
635 		cmn_err(CE_WARN, "iwk_attach(): "
636 		    "failed to allocate and initialize ring\n");
637 		goto attach_fail4;
638 	}
639 
640 	sc->sc_hdr = (iwk_firmware_hdr_t *)iwk_fw_bin;
641 
642 	err = iwk_alloc_fw_dma(sc);
643 	if (err != DDI_SUCCESS) {
644 		cmn_err(CE_WARN, "iwk_attach(): "
645 		    "failed to allocate firmware dma\n");
646 		goto attach_fail5;
647 	}
648 
649 	/*
650 	 * Initialize the wifi part, which will be used by
651 	 * generic layer
652 	 */
653 	ic = &sc->sc_ic;
654 	ic->ic_phytype  = IEEE80211_T_OFDM;
655 	ic->ic_opmode   = IEEE80211_M_STA; /* default to BSS mode */
656 	ic->ic_state    = IEEE80211_S_INIT;
657 	ic->ic_maxrssi  = 100; /* experimental number */
658 	ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT |
659 	    IEEE80211_C_PMGT | IEEE80211_C_SHSLOT;
660 	/*
661 	 * use software WEP and TKIP, hardware CCMP;
662 	 */
663 	ic->ic_caps |= IEEE80211_C_AES_CCM;
664 	/*
665 	 * Support WPA/WPA2
666 	 */
667 	ic->ic_caps |= IEEE80211_C_WPA;
668 	/*
669 	 * support Adhoc mode
670 	 */
671 	ic->ic_caps |= IEEE80211_C_IBSS;
672 
673 	/* set supported .11b and .11g rates */
674 	ic->ic_sup_rates[IEEE80211_MODE_11B] = iwk_rateset_11b;
675 	ic->ic_sup_rates[IEEE80211_MODE_11G] = iwk_rateset_11g;
676 
677 	/* set supported .11b and .11g channels (1 through 11) */
678 	for (i = 1; i <= 11; i++) {
679 		ic->ic_sup_channels[i].ich_freq =
680 		    ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
681 		ic->ic_sup_channels[i].ich_flags =
682 		    IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
683 		    IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ |
684 		    IEEE80211_CHAN_PASSIVE;
685 	}
686 	ic->ic_ibss_chan = &ic->ic_sup_channels[0];
687 
688 	ic->ic_xmit = iwk_send;
689 	/*
690 	 * init Wifi layer
691 	 */
692 	ieee80211_attach(ic);
693 
694 	/*
695 	 * different instance has different WPA door
696 	 */
697 	(void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
698 	    ddi_driver_name(dip),
699 	    ddi_get_instance(dip));
700 
701 	/*
702 	 * Override 80211 default routines
703 	 */
704 	sc->sc_newstate = ic->ic_newstate;
705 	ic->ic_newstate = iwk_newstate;
706 	ic->ic_watchdog = iwk_watchdog;
707 	sc->sc_recv_mgmt = ic->ic_recv_mgmt;
708 	ic->ic_recv_mgmt = iwk_recv_mgmt;
709 	ic->ic_node_alloc = iwk_node_alloc;
710 	ic->ic_node_free = iwk_node_free;
711 	ic->ic_crypto.cs_key_set = iwk_key_set;
712 	ieee80211_media_init(ic);
713 	/*
714 	 * initialize default tx key
715 	 */
716 	ic->ic_def_txkey = 0;
717 	err = ddi_intr_add_softint(dip, &sc->sc_soft_hdl, DDI_INTR_SOFTPRI_MAX,
718 	    iwk_rx_softintr, (caddr_t)sc);
719 	if (err != DDI_SUCCESS) {
720 		cmn_err(CE_WARN, "iwk_attach(): "
721 		    "add soft interrupt failed\n");
722 		goto attach_fail7;
723 	}
724 
725 	/*
726 	 * Add the interrupt handler
727 	 */
728 	err = ddi_intr_add_handler(sc->sc_intr_htable[0], iwk_intr,
729 	    (caddr_t)sc, NULL);
730 	if (err != DDI_SUCCESS) {
731 		cmn_err(CE_WARN, "iwk_attach(): "
732 		    "ddi_intr_add_handle() failed\n");
733 		goto attach_fail8;
734 	}
735 
736 	err = ddi_intr_enable(sc->sc_intr_htable[0]);
737 	if (err != DDI_SUCCESS) {
738 		cmn_err(CE_WARN, "iwk_attach(): "
739 		    "ddi_intr_enable() failed\n");
740 		goto attach_fail_intr_d;
741 	}
742 
743 	/*
744 	 * Initialize pointer to device specific functions
745 	 */
746 	wd.wd_secalloc = WIFI_SEC_NONE;
747 	wd.wd_opmode = ic->ic_opmode;
748 	IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr);
749 
750 	macp = mac_alloc(MAC_VERSION);
751 	if (macp == NULL) {
752 		cmn_err(CE_WARN,
753 		    "iwk_attach(): failed to do mac_alloc()\n");
754 		goto attach_fail9;
755 	}
756 
757 	macp->m_type_ident	= MAC_PLUGIN_IDENT_WIFI;
758 	macp->m_driver		= sc;
759 	macp->m_dip		= dip;
760 	macp->m_src_addr	= ic->ic_macaddr;
761 	macp->m_callbacks	= &iwk_m_callbacks;
762 	macp->m_min_sdu		= 0;
763 	macp->m_max_sdu		= IEEE80211_MTU;
764 	macp->m_pdata		= &wd;
765 	macp->m_pdata_size	= sizeof (wd);
766 
767 	/*
768 	 * Register the macp to mac
769 	 */
770 	err = mac_register(macp, &ic->ic_mach);
771 	mac_free(macp);
772 	if (err != DDI_SUCCESS) {
773 		cmn_err(CE_WARN,
774 		    "iwk_attach(): failed to do mac_register()\n");
775 		goto attach_fail9;
776 	}
777 
778 	/*
779 	 * Create minor node of type DDI_NT_NET_WIFI
780 	 */
781 	(void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_4965"%d", instance);
782 	err = ddi_create_minor_node(dip, strbuf, S_IFCHR,
783 	    instance + 1, DDI_NT_NET_WIFI, 0);
784 	if (err != DDI_SUCCESS)
785 		cmn_err(CE_WARN,
786 		    "iwk_attach(): failed to do ddi_create_minor_node()\n");
787 
788 	/*
789 	 * Notify link is down now
790 	 */
791 	mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
792 
793 	/*
794 	 * create the mf thread to handle the link status,
795 	 * recovery fatal error, etc.
796 	 */
797 	sc->sc_mf_thread_switch = 1;
798 	if (sc->sc_mf_thread == NULL)
799 		sc->sc_mf_thread = thread_create((caddr_t)NULL, 0,
800 		    iwk_thread, sc, 0, &p0, TS_RUN, minclsyspri);
801 
802 	sc->sc_flags |= IWK_F_ATTACHED;
803 
804 	return (DDI_SUCCESS);
805 attach_fail9:
806 	(void) ddi_intr_disable(sc->sc_intr_htable[0]);
807 attach_fail_intr_d:
808 	(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
809 
810 attach_fail8:
811 	(void) ddi_intr_remove_softint(sc->sc_soft_hdl);
812 	sc->sc_soft_hdl = NULL;
813 attach_fail7:
814 	ieee80211_detach(ic);
815 attach_fail6:
816 	iwk_free_fw_dma(sc);
817 attach_fail5:
818 	iwk_ring_free(sc);
819 attach_fail4:
820 	iwk_free_kw(sc);
821 attach_fail3a:
822 	iwk_free_shared(sc);
823 attach_fail3:
824 	iwk_destroy_locks(sc);
825 attach_fail_intr_c:
826 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
827 attach_fail_intr_b:
828 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
829 attach_fail_intr_a:
830 	ddi_regs_map_free(&sc->sc_handle);
831 attach_fail2a:
832 	ddi_regs_map_free(&sc->sc_cfg_handle);
833 attach_fail2:
834 	ddi_soft_state_free(iwk_soft_state_p, instance);
835 attach_fail1:
836 	return (err);
837 }
838 
839 int
840 iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
841 {
842 	iwk_sc_t	*sc;
843 	int err;
844 
845 	sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip));
846 	ASSERT(sc != NULL);
847 
848 	switch (cmd) {
849 	case DDI_DETACH:
850 		break;
851 	case DDI_SUSPEND:
852 		mutex_enter(&sc->sc_glock);
853 		sc->sc_flags |= IWK_F_SUSPEND;
854 		mutex_exit(&sc->sc_glock);
855 		if (sc->sc_flags & IWK_F_RUNNING) {
856 			iwk_stop(sc);
857 		}
858 
859 		IWK_DBG((IWK_DEBUG_RESUME, "iwk: suspend\n"));
860 		return (DDI_SUCCESS);
861 	default:
862 		return (DDI_FAILURE);
863 	}
864 
865 	if (!(sc->sc_flags & IWK_F_ATTACHED))
866 		return (DDI_FAILURE);
867 
868 	err = mac_disable(sc->sc_ic.ic_mach);
869 	if (err != DDI_SUCCESS)
870 		return (err);
871 
872 	/*
873 	 * Destroy the mf_thread
874 	 */
875 	mutex_enter(&sc->sc_mt_lock);
876 	sc->sc_mf_thread_switch = 0;
877 	while (sc->sc_mf_thread != NULL) {
878 		if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0)
879 			break;
880 	}
881 	mutex_exit(&sc->sc_mt_lock);
882 
883 	iwk_stop(sc);
884 	DELAY(500000);
885 
886 	/*
887 	 * Unregiste from the MAC layer subsystem
888 	 */
889 	(void) mac_unregister(sc->sc_ic.ic_mach);
890 
891 	mutex_enter(&sc->sc_glock);
892 	iwk_free_fw_dma(sc);
893 	iwk_ring_free(sc);
894 	iwk_free_kw(sc);
895 	iwk_free_shared(sc);
896 	mutex_exit(&sc->sc_glock);
897 
898 	(void) ddi_intr_disable(sc->sc_intr_htable[0]);
899 	(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
900 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
901 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
902 
903 	(void) ddi_intr_remove_softint(sc->sc_soft_hdl);
904 	sc->sc_soft_hdl = NULL;
905 
906 	/*
907 	 * detach ieee80211
908 	 */
909 	ieee80211_detach(&sc->sc_ic);
910 
911 	iwk_destroy_locks(sc);
912 
913 	ddi_regs_map_free(&sc->sc_handle);
914 	ddi_regs_map_free(&sc->sc_cfg_handle);
915 	ddi_remove_minor_node(dip, NULL);
916 	ddi_soft_state_free(iwk_soft_state_p, ddi_get_instance(dip));
917 
918 	return (DDI_SUCCESS);
919 }
920 
921 /*
922  * quiesce(9E) entry point.
923  *
924  * This function is called when the system is single-threaded at high
925  * PIL with preemption disabled. Therefore, this function must not be
926  * blocked.
927  *
928  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
929  * DDI_FAILURE indicates an error condition and should almost never happen.
930  */
931 int
932 iwk_quiesce(dev_info_t *dip)
933 {
934 	iwk_sc_t	*sc;
935 
936 	sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip));
937 	ASSERT(sc != NULL);
938 
939 	/* no message prints and no lock accquisition */
940 #ifdef DEBUG
941 	iwk_dbg_flags = 0;
942 #endif
943 	sc->sc_flags |= IWK_F_QUIESCED;
944 
945 	iwk_stop(sc);
946 
947 	return (DDI_SUCCESS);
948 }
949 
950 static void
951 iwk_destroy_locks(iwk_sc_t *sc)
952 {
953 	cv_destroy(&sc->sc_mt_cv);
954 	mutex_destroy(&sc->sc_mt_lock);
955 	cv_destroy(&sc->sc_tx_cv);
956 	cv_destroy(&sc->sc_cmd_cv);
957 	cv_destroy(&sc->sc_fw_cv);
958 	mutex_destroy(&sc->sc_tx_lock);
959 	mutex_destroy(&sc->sc_glock);
960 }
961 
962 /*
963  * Allocate an area of memory and a DMA handle for accessing it
964  */
965 static int
966 iwk_alloc_dma_mem(iwk_sc_t *sc, size_t memsize,
967     ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p,
968     uint_t dma_flags, iwk_dma_t *dma_p)
969 {
970 	caddr_t vaddr;
971 	int err;
972 
973 	/*
974 	 * Allocate handle
975 	 */
976 	err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p,
977 	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
978 	if (err != DDI_SUCCESS) {
979 		dma_p->dma_hdl = NULL;
980 		return (DDI_FAILURE);
981 	}
982 
983 	/*
984 	 * Allocate memory
985 	 */
986 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
987 	    dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
988 	    DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
989 	if (err != DDI_SUCCESS) {
990 		ddi_dma_free_handle(&dma_p->dma_hdl);
991 		dma_p->dma_hdl = NULL;
992 		dma_p->acc_hdl = NULL;
993 		return (DDI_FAILURE);
994 	}
995 
996 	/*
997 	 * Bind the two together
998 	 */
999 	dma_p->mem_va = vaddr;
1000 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
1001 	    vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
1002 	    &dma_p->cookie, &dma_p->ncookies);
1003 	if (err != DDI_DMA_MAPPED) {
1004 		ddi_dma_mem_free(&dma_p->acc_hdl);
1005 		ddi_dma_free_handle(&dma_p->dma_hdl);
1006 		dma_p->acc_hdl = NULL;
1007 		dma_p->dma_hdl = NULL;
1008 		return (DDI_FAILURE);
1009 	}
1010 
1011 	dma_p->nslots = ~0U;
1012 	dma_p->size = ~0U;
1013 	dma_p->token = ~0U;
1014 	dma_p->offset = 0;
1015 	return (DDI_SUCCESS);
1016 }
1017 
1018 /*
1019  * Free one allocated area of DMAable memory
1020  */
1021 static void
1022 iwk_free_dma_mem(iwk_dma_t *dma_p)
1023 {
1024 	if (dma_p->dma_hdl != NULL) {
1025 		if (dma_p->ncookies) {
1026 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
1027 			dma_p->ncookies = 0;
1028 		}
1029 		ddi_dma_free_handle(&dma_p->dma_hdl);
1030 		dma_p->dma_hdl = NULL;
1031 	}
1032 
1033 	if (dma_p->acc_hdl != NULL) {
1034 		ddi_dma_mem_free(&dma_p->acc_hdl);
1035 		dma_p->acc_hdl = NULL;
1036 	}
1037 }
1038 
1039 /*
1040  *
1041  */
1042 static int
1043 iwk_alloc_fw_dma(iwk_sc_t *sc)
1044 {
1045 	int err = DDI_SUCCESS;
1046 	iwk_dma_t *dma_p;
1047 	char *t;
1048 
1049 	/*
1050 	 * firmware image layout:
1051 	 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->|
1052 	 */
1053 	t = (char *)(sc->sc_hdr + 1);
1054 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz),
1055 	    &fw_dma_attr, &iwk_dma_accattr,
1056 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1057 	    &sc->sc_dma_fw_text);
1058 	dma_p = &sc->sc_dma_fw_text;
1059 	IWK_DBG((IWK_DEBUG_DMA, "text[ncookies:%d addr:%lx size:%lx]\n",
1060 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1061 	    dma_p->cookie.dmac_size));
1062 	if (err != DDI_SUCCESS) {
1063 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1064 		    " text dma memory");
1065 		goto fail;
1066 	}
1067 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->textsz));
1068 
1069 	t += LE_32(sc->sc_hdr->textsz);
1070 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1071 	    &fw_dma_attr, &iwk_dma_accattr,
1072 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1073 	    &sc->sc_dma_fw_data);
1074 	dma_p = &sc->sc_dma_fw_data;
1075 	IWK_DBG((IWK_DEBUG_DMA, "data[ncookies:%d addr:%lx size:%lx]\n",
1076 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1077 	    dma_p->cookie.dmac_size));
1078 	if (err != DDI_SUCCESS) {
1079 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1080 		    " data dma memory");
1081 		goto fail;
1082 	}
1083 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1084 
1085 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1086 	    &fw_dma_attr, &iwk_dma_accattr,
1087 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1088 	    &sc->sc_dma_fw_data_bak);
1089 	dma_p = &sc->sc_dma_fw_data_bak;
1090 	IWK_DBG((IWK_DEBUG_DMA, "data_bak[ncookies:%d addr:%lx "
1091 	    "size:%lx]\n",
1092 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1093 	    dma_p->cookie.dmac_size));
1094 	if (err != DDI_SUCCESS) {
1095 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1096 		    " data bakeup dma memory");
1097 		goto fail;
1098 	}
1099 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1100 
1101 	t += LE_32(sc->sc_hdr->datasz);
1102 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz),
1103 	    &fw_dma_attr, &iwk_dma_accattr,
1104 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1105 	    &sc->sc_dma_fw_init_text);
1106 	dma_p = &sc->sc_dma_fw_init_text;
1107 	IWK_DBG((IWK_DEBUG_DMA, "init_text[ncookies:%d addr:%lx "
1108 	    "size:%lx]\n",
1109 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1110 	    dma_p->cookie.dmac_size));
1111 	if (err != DDI_SUCCESS) {
1112 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1113 		    "init text dma memory");
1114 		goto fail;
1115 	}
1116 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_textsz));
1117 
1118 	t += LE_32(sc->sc_hdr->init_textsz);
1119 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz),
1120 	    &fw_dma_attr, &iwk_dma_accattr,
1121 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1122 	    &sc->sc_dma_fw_init_data);
1123 	dma_p = &sc->sc_dma_fw_init_data;
1124 	IWK_DBG((IWK_DEBUG_DMA, "init_data[ncookies:%d addr:%lx "
1125 	    "size:%lx]\n",
1126 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1127 	    dma_p->cookie.dmac_size));
1128 	if (err != DDI_SUCCESS) {
1129 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1130 		    "init data dma memory");
1131 		goto fail;
1132 	}
1133 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_datasz));
1134 
1135 	sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz);
1136 fail:
1137 	return (err);
1138 }
1139 
1140 static void
1141 iwk_free_fw_dma(iwk_sc_t *sc)
1142 {
1143 	iwk_free_dma_mem(&sc->sc_dma_fw_text);
1144 	iwk_free_dma_mem(&sc->sc_dma_fw_data);
1145 	iwk_free_dma_mem(&sc->sc_dma_fw_data_bak);
1146 	iwk_free_dma_mem(&sc->sc_dma_fw_init_text);
1147 	iwk_free_dma_mem(&sc->sc_dma_fw_init_data);
1148 }
1149 
1150 /*
1151  * Allocate a shared page between host and NIC.
1152  */
1153 static int
1154 iwk_alloc_shared(iwk_sc_t *sc)
1155 {
1156 	iwk_dma_t *dma_p;
1157 	int err = DDI_SUCCESS;
1158 
1159 	/* must be aligned on a 4K-page boundary */
1160 	err = iwk_alloc_dma_mem(sc, sizeof (iwk_shared_t),
1161 	    &sh_dma_attr, &iwk_dma_accattr,
1162 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1163 	    &sc->sc_dma_sh);
1164 	if (err != DDI_SUCCESS)
1165 		goto fail;
1166 	sc->sc_shared = (iwk_shared_t *)sc->sc_dma_sh.mem_va;
1167 
1168 	dma_p = &sc->sc_dma_sh;
1169 	IWK_DBG((IWK_DEBUG_DMA, "sh[ncookies:%d addr:%lx size:%lx]\n",
1170 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1171 	    dma_p->cookie.dmac_size));
1172 
1173 	return (err);
1174 fail:
1175 	iwk_free_shared(sc);
1176 	return (err);
1177 }
1178 
1179 static void
1180 iwk_free_shared(iwk_sc_t *sc)
1181 {
1182 	iwk_free_dma_mem(&sc->sc_dma_sh);
1183 }
1184 
1185 /*
1186  * Allocate a keep warm page.
1187  */
1188 static int
1189 iwk_alloc_kw(iwk_sc_t *sc)
1190 {
1191 	iwk_dma_t *dma_p;
1192 	int err = DDI_SUCCESS;
1193 
1194 	/* must be aligned on a 4K-page boundary */
1195 	err = iwk_alloc_dma_mem(sc, IWK_KW_SIZE,
1196 	    &kw_dma_attr, &iwk_dma_accattr,
1197 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1198 	    &sc->sc_dma_kw);
1199 	if (err != DDI_SUCCESS)
1200 		goto fail;
1201 
1202 	dma_p = &sc->sc_dma_kw;
1203 	IWK_DBG((IWK_DEBUG_DMA, "kw[ncookies:%d addr:%lx size:%lx]\n",
1204 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1205 	    dma_p->cookie.dmac_size));
1206 
1207 	return (err);
1208 fail:
1209 	iwk_free_kw(sc);
1210 	return (err);
1211 }
1212 
1213 static void
1214 iwk_free_kw(iwk_sc_t *sc)
1215 {
1216 	iwk_free_dma_mem(&sc->sc_dma_kw);
1217 }
1218 
1219 static int
1220 iwk_alloc_rx_ring(iwk_sc_t *sc)
1221 {
1222 	iwk_rx_ring_t *ring;
1223 	iwk_rx_data_t *data;
1224 	iwk_dma_t *dma_p;
1225 	int i, err = DDI_SUCCESS;
1226 
1227 	ring = &sc->sc_rxq;
1228 	ring->cur = 0;
1229 
1230 	err = iwk_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t),
1231 	    &ring_desc_dma_attr, &iwk_dma_accattr,
1232 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1233 	    &ring->dma_desc);
1234 	if (err != DDI_SUCCESS) {
1235 		cmn_err(CE_WARN, "dma alloc rx ring desc failed\n");
1236 		goto fail;
1237 	}
1238 	ring->desc = (uint32_t *)ring->dma_desc.mem_va;
1239 	dma_p = &ring->dma_desc;
1240 	IWK_DBG((IWK_DEBUG_DMA, "rx bd[ncookies:%d addr:%lx size:%lx]\n",
1241 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1242 	    dma_p->cookie.dmac_size));
1243 
1244 	/*
1245 	 * Allocate Rx buffers.
1246 	 */
1247 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1248 		data = &ring->data[i];
1249 		err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1250 		    &rx_buffer_dma_attr, &iwk_dma_accattr,
1251 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1252 		    &data->dma_data);
1253 		if (err != DDI_SUCCESS) {
1254 			cmn_err(CE_WARN, "dma alloc rx ring buf[%d] "
1255 			    "failed\n", i);
1256 			goto fail;
1257 		}
1258 		/*
1259 		 * the physical address bit [8-36] are used,
1260 		 * instead of bit [0-31] in 3945.
1261 		 */
1262 		ring->desc[i] = LE_32((uint32_t)
1263 		    (data->dma_data.cookie.dmac_address >> 8));
1264 	}
1265 	dma_p = &ring->data[0].dma_data;
1266 	IWK_DBG((IWK_DEBUG_DMA, "rx buffer[0][ncookies:%d addr:%lx "
1267 	    "size:%lx]\n",
1268 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1269 	    dma_p->cookie.dmac_size));
1270 
1271 	IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1272 
1273 	return (err);
1274 
1275 fail:
1276 	iwk_free_rx_ring(sc);
1277 	return (err);
1278 }
1279 
1280 static void
1281 iwk_reset_rx_ring(iwk_sc_t *sc)
1282 {
1283 	int n;
1284 
1285 	iwk_mac_access_enter(sc);
1286 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1287 	for (n = 0; n < 2000; n++) {
1288 		if (IWK_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24))
1289 			break;
1290 		DELAY(1000);
1291 	}
1292 
1293 	if (n == 2000)
1294 		IWK_DBG((IWK_DEBUG_DMA, "timeout resetting Rx ring\n"));
1295 
1296 	iwk_mac_access_exit(sc);
1297 
1298 	sc->sc_rxq.cur = 0;
1299 }
1300 
1301 static void
1302 iwk_free_rx_ring(iwk_sc_t *sc)
1303 {
1304 	int i;
1305 
1306 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1307 		if (sc->sc_rxq.data[i].dma_data.dma_hdl)
1308 			IWK_DMA_SYNC(sc->sc_rxq.data[i].dma_data,
1309 			    DDI_DMA_SYNC_FORCPU);
1310 		iwk_free_dma_mem(&sc->sc_rxq.data[i].dma_data);
1311 	}
1312 
1313 	if (sc->sc_rxq.dma_desc.dma_hdl)
1314 		IWK_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV);
1315 	iwk_free_dma_mem(&sc->sc_rxq.dma_desc);
1316 }
1317 
1318 static int
1319 iwk_alloc_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring,
1320     int slots, int qid)
1321 {
1322 	iwk_tx_data_t *data;
1323 	iwk_tx_desc_t *desc_h;
1324 	uint32_t paddr_desc_h;
1325 	iwk_cmd_t *cmd_h;
1326 	uint32_t paddr_cmd_h;
1327 	iwk_dma_t *dma_p;
1328 	int i, err = DDI_SUCCESS;
1329 
1330 	ring->qid = qid;
1331 	ring->count = TFD_QUEUE_SIZE_MAX;
1332 	ring->window = slots;
1333 	ring->queued = 0;
1334 	ring->cur = 0;
1335 
1336 	err = iwk_alloc_dma_mem(sc,
1337 	    TFD_QUEUE_SIZE_MAX * sizeof (iwk_tx_desc_t),
1338 	    &ring_desc_dma_attr, &iwk_dma_accattr,
1339 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1340 	    &ring->dma_desc);
1341 	if (err != DDI_SUCCESS) {
1342 		cmn_err(CE_WARN, "dma alloc tx ring desc[%d] "
1343 		    "failed\n", qid);
1344 		goto fail;
1345 	}
1346 	dma_p = &ring->dma_desc;
1347 	IWK_DBG((IWK_DEBUG_DMA, "tx bd[ncookies:%d addr:%lx size:%lx]\n",
1348 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1349 	    dma_p->cookie.dmac_size));
1350 
1351 	desc_h = (iwk_tx_desc_t *)ring->dma_desc.mem_va;
1352 	paddr_desc_h = ring->dma_desc.cookie.dmac_address;
1353 
1354 	err = iwk_alloc_dma_mem(sc,
1355 	    TFD_QUEUE_SIZE_MAX * sizeof (iwk_cmd_t),
1356 	    &cmd_dma_attr, &iwk_dma_accattr,
1357 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1358 	    &ring->dma_cmd);
1359 	if (err != DDI_SUCCESS) {
1360 		cmn_err(CE_WARN, "dma alloc tx ring cmd[%d] "
1361 		    "failed\n", qid);
1362 		goto fail;
1363 	}
1364 	dma_p = &ring->dma_cmd;
1365 	IWK_DBG((IWK_DEBUG_DMA, "tx cmd[ncookies:%d addr:%lx size:%lx]\n",
1366 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1367 	    dma_p->cookie.dmac_size));
1368 
1369 	cmd_h = (iwk_cmd_t *)ring->dma_cmd.mem_va;
1370 	paddr_cmd_h = ring->dma_cmd.cookie.dmac_address;
1371 
1372 	/*
1373 	 * Allocate Tx buffers.
1374 	 */
1375 	ring->data = kmem_zalloc(sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX,
1376 	    KM_NOSLEEP);
1377 	if (ring->data == NULL) {
1378 		cmn_err(CE_WARN, "could not allocate tx data slots\n");
1379 		goto fail;
1380 	}
1381 
1382 	for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) {
1383 		data = &ring->data[i];
1384 		err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1385 		    &tx_buffer_dma_attr, &iwk_dma_accattr,
1386 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
1387 		    &data->dma_data);
1388 		if (err != DDI_SUCCESS) {
1389 			cmn_err(CE_WARN, "dma alloc tx ring "
1390 			    "buf[%d] failed\n", i);
1391 			goto fail;
1392 		}
1393 
1394 		data->desc = desc_h + i;
1395 		data->paddr_desc = paddr_desc_h +
1396 		    _PTRDIFF(data->desc, desc_h);
1397 		data->cmd = cmd_h +  i; /* (i % slots); */
1398 		/* ((i % slots) * sizeof (iwk_cmd_t)); */
1399 		data->paddr_cmd = paddr_cmd_h +
1400 		    _PTRDIFF(data->cmd, cmd_h);
1401 	}
1402 	dma_p = &ring->data[0].dma_data;
1403 	IWK_DBG((IWK_DEBUG_DMA, "tx buffer[0][ncookies:%d addr:%lx "
1404 	    "size:%lx]\n",
1405 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1406 	    dma_p->cookie.dmac_size));
1407 
1408 	return (err);
1409 
1410 fail:
1411 	if (ring->data)
1412 		kmem_free(ring->data,
1413 		    sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX);
1414 	iwk_free_tx_ring(sc, ring);
1415 	return (err);
1416 }
1417 
1418 static void
1419 iwk_reset_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring)
1420 {
1421 	iwk_tx_data_t *data;
1422 	int i, n;
1423 
1424 	iwk_mac_access_enter(sc);
1425 
1426 	IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0);
1427 	for (n = 0; n < 200; n++) {
1428 		if (IWK_READ(sc, IWK_FH_TSSR_TX_STATUS_REG) &
1429 		    IWK_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid))
1430 			break;
1431 		DELAY(10);
1432 	}
1433 	if (n == 200) {
1434 		IWK_DBG((IWK_DEBUG_DMA, "timeout reset tx ring %d\n",
1435 		    ring->qid));
1436 	}
1437 	iwk_mac_access_exit(sc);
1438 
1439 	for (i = 0; i < ring->count; i++) {
1440 		data = &ring->data[i];
1441 		IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
1442 	}
1443 
1444 	ring->queued = 0;
1445 	ring->cur = 0;
1446 }
1447 
1448 /*ARGSUSED*/
1449 static void
1450 iwk_free_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring)
1451 {
1452 	int i;
1453 
1454 	if (ring->dma_desc.dma_hdl != NULL)
1455 		IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1456 	iwk_free_dma_mem(&ring->dma_desc);
1457 
1458 	if (ring->dma_cmd.dma_hdl != NULL)
1459 		IWK_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV);
1460 	iwk_free_dma_mem(&ring->dma_cmd);
1461 
1462 	if (ring->data != NULL) {
1463 		for (i = 0; i < ring->count; i++) {
1464 			if (ring->data[i].dma_data.dma_hdl)
1465 				IWK_DMA_SYNC(ring->data[i].dma_data,
1466 				    DDI_DMA_SYNC_FORDEV);
1467 			iwk_free_dma_mem(&ring->data[i].dma_data);
1468 		}
1469 		kmem_free(ring->data, ring->count * sizeof (iwk_tx_data_t));
1470 	}
1471 }
1472 
1473 static int
1474 iwk_ring_init(iwk_sc_t *sc)
1475 {
1476 	int i, err = DDI_SUCCESS;
1477 
1478 	for (i = 0; i < IWK_NUM_QUEUES; i++) {
1479 		if (i == IWK_CMD_QUEUE_NUM)
1480 			continue;
1481 		err = iwk_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS,
1482 		    i);
1483 		if (err != DDI_SUCCESS)
1484 			goto fail;
1485 	}
1486 	err = iwk_alloc_tx_ring(sc, &sc->sc_txq[IWK_CMD_QUEUE_NUM],
1487 	    TFD_CMD_SLOTS, IWK_CMD_QUEUE_NUM);
1488 	if (err != DDI_SUCCESS)
1489 		goto fail;
1490 	err = iwk_alloc_rx_ring(sc);
1491 	if (err != DDI_SUCCESS)
1492 		goto fail;
1493 	return (err);
1494 
1495 fail:
1496 	return (err);
1497 }
1498 
1499 static void
1500 iwk_ring_free(iwk_sc_t *sc)
1501 {
1502 	int i = IWK_NUM_QUEUES;
1503 
1504 	iwk_free_rx_ring(sc);
1505 	while (--i >= 0) {
1506 		iwk_free_tx_ring(sc, &sc->sc_txq[i]);
1507 	}
1508 }
1509 
1510 /* ARGSUSED */
1511 static ieee80211_node_t *
1512 iwk_node_alloc(ieee80211com_t *ic)
1513 {
1514 	iwk_amrr_t *amrr;
1515 
1516 	amrr = kmem_zalloc(sizeof (iwk_amrr_t), KM_SLEEP);
1517 	if (amrr != NULL)
1518 		iwk_amrr_init(amrr);
1519 	return (&amrr->in);
1520 }
1521 
1522 static void
1523 iwk_node_free(ieee80211_node_t *in)
1524 {
1525 	ieee80211com_t *ic = in->in_ic;
1526 
1527 	ic->ic_node_cleanup(in);
1528 	if (in->in_wpa_ie != NULL)
1529 		ieee80211_free(in->in_wpa_ie);
1530 	kmem_free(in, sizeof (iwk_amrr_t));
1531 }
1532 
1533 /*ARGSUSED*/
1534 static int
1535 iwk_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1536 {
1537 	iwk_sc_t *sc = (iwk_sc_t *)ic;
1538 	ieee80211_node_t *in = ic->ic_bss;
1539 	enum ieee80211_state ostate = ic->ic_state;
1540 	int i, err = IWK_SUCCESS;
1541 
1542 	mutex_enter(&sc->sc_glock);
1543 	switch (nstate) {
1544 	case IEEE80211_S_SCAN:
1545 		switch (ostate) {
1546 		case IEEE80211_S_INIT:
1547 		{
1548 			iwk_add_sta_t node;
1549 
1550 			sc->sc_flags |= IWK_F_SCANNING;
1551 			sc->sc_scan_pending = 0;
1552 			iwk_set_led(sc, 2, 10, 2);
1553 
1554 			/*
1555 			 * clear association to receive beacons from
1556 			 * all BSS'es
1557 			 */
1558 			sc->sc_config.assoc_id = 0;
1559 			sc->sc_config.filter_flags &=
1560 			    ~LE_32(RXON_FILTER_ASSOC_MSK);
1561 
1562 			IWK_DBG((IWK_DEBUG_80211, "config chan %d "
1563 			    "flags %x filter_flags %x\n", sc->sc_config.chan,
1564 			    sc->sc_config.flags, sc->sc_config.filter_flags));
1565 
1566 			err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
1567 			    sizeof (iwk_rxon_cmd_t), 1);
1568 			if (err != IWK_SUCCESS) {
1569 				cmn_err(CE_WARN,
1570 				    "could not clear association\n");
1571 				sc->sc_flags &= ~IWK_F_SCANNING;
1572 				mutex_exit(&sc->sc_glock);
1573 				return (err);
1574 			}
1575 
1576 			/* add broadcast node to send probe request */
1577 			(void) memset(&node, 0, sizeof (node));
1578 			(void) memset(&node.bssid, 0xff, IEEE80211_ADDR_LEN);
1579 			node.id = IWK_BROADCAST_ID;
1580 			err = iwk_cmd(sc, REPLY_ADD_STA, &node,
1581 			    sizeof (node), 1);
1582 			if (err != IWK_SUCCESS) {
1583 				cmn_err(CE_WARN, "could not add "
1584 				    "broadcast node\n");
1585 				sc->sc_flags &= ~IWK_F_SCANNING;
1586 				mutex_exit(&sc->sc_glock);
1587 				return (err);
1588 			}
1589 			break;
1590 		}
1591 
1592 		case IEEE80211_S_AUTH:
1593 		case IEEE80211_S_ASSOC:
1594 		case IEEE80211_S_RUN:
1595 			sc->sc_flags |= IWK_F_SCANNING;
1596 			sc->sc_scan_pending = 0;
1597 
1598 			iwk_set_led(sc, 2, 10, 2);
1599 			/* FALLTHRU */
1600 		case IEEE80211_S_SCAN:
1601 			mutex_exit(&sc->sc_glock);
1602 			/* step to next channel before actual FW scan */
1603 			err = sc->sc_newstate(ic, nstate, arg);
1604 			mutex_enter(&sc->sc_glock);
1605 			if ((err != 0) || ((err = iwk_scan(sc)) != 0)) {
1606 				cmn_err(CE_WARN,
1607 				    "could not initiate scan\n");
1608 				sc->sc_flags &= ~IWK_F_SCANNING;
1609 				ieee80211_cancel_scan(ic);
1610 			}
1611 			mutex_exit(&sc->sc_glock);
1612 			return (err);
1613 		default:
1614 			break;
1615 
1616 		}
1617 		sc->sc_clk = 0;
1618 		break;
1619 
1620 	case IEEE80211_S_AUTH:
1621 		if (ostate == IEEE80211_S_SCAN) {
1622 			sc->sc_flags &= ~IWK_F_SCANNING;
1623 		}
1624 
1625 		/* reset state to handle reassociations correctly */
1626 		sc->sc_config.assoc_id = 0;
1627 		sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
1628 
1629 		/*
1630 		 * before sending authentication and association request frame,
1631 		 * we need do something in the hardware, such as setting the
1632 		 * channel same to the target AP...
1633 		 */
1634 		if ((err = iwk_hw_set_before_auth(sc)) != 0) {
1635 			cmn_err(CE_WARN, "could not setup firmware for "
1636 			    "authentication\n");
1637 			mutex_exit(&sc->sc_glock);
1638 			return (err);
1639 		}
1640 		break;
1641 
1642 	case IEEE80211_S_RUN:
1643 		if (ostate == IEEE80211_S_SCAN) {
1644 			sc->sc_flags &= ~IWK_F_SCANNING;
1645 		}
1646 
1647 		if (ic->ic_opmode == IEEE80211_M_MONITOR) {
1648 			/* let LED blink when monitoring */
1649 			iwk_set_led(sc, 2, 10, 10);
1650 			break;
1651 		}
1652 		IWK_DBG((IWK_DEBUG_80211, "iwk: associated."));
1653 
1654 		/* IBSS mode */
1655 		if (ic->ic_opmode == IEEE80211_M_IBSS) {
1656 			/*
1657 			 * clean all nodes in ibss node table
1658 			 * in order to be consistent with hardware
1659 			 */
1660 			err = iwk_run_state_config_ibss(ic);
1661 			if (err != IWK_SUCCESS) {
1662 				cmn_err(CE_WARN, "iwk_newstate(): "
1663 				    "failed to update configuration "
1664 				    "in IBSS mode\n");
1665 				mutex_exit(&sc->sc_glock);
1666 				return (err);
1667 			}
1668 		}
1669 
1670 		/* none IBSS mode */
1671 		if (ic->ic_opmode != IEEE80211_M_IBSS) {
1672 			/* update adapter's configuration */
1673 			err = iwk_run_state_config_sta(ic);
1674 			if (err != IWK_SUCCESS) {
1675 				cmn_err(CE_WARN, "iwk_newstate(): "
1676 				    "failed to update configuration "
1677 				    "in none IBSS mode\n");
1678 				mutex_exit(&sc->sc_glock);
1679 				return (err);
1680 			}
1681 		}
1682 
1683 		/* obtain current temperature of chipset */
1684 		sc->sc_tempera = iwk_curr_tempera(sc);
1685 
1686 		/*
1687 		 * make Tx power calibration to determine
1688 		 * the gains of DSP and radio
1689 		 */
1690 		err = iwk_tx_power_calibration(sc);
1691 		if (err) {
1692 			cmn_err(CE_WARN, "iwk_newstate(): "
1693 			    "failed to set tx power table\n");
1694 			mutex_exit(&sc->sc_glock);
1695 			return (err);
1696 		}
1697 
1698 		if (ic->ic_opmode == IEEE80211_M_IBSS) {
1699 
1700 			/*
1701 			 * allocate and transmit beacon frames
1702 			 */
1703 			err = iwk_start_tx_beacon(ic);
1704 			if (err != IWK_SUCCESS) {
1705 				cmn_err(CE_WARN, "iwk_newstate(): "
1706 				    "can't transmit beacon frames\n");
1707 				mutex_exit(&sc->sc_glock);
1708 				return (err);
1709 			}
1710 		}
1711 
1712 		/* start automatic rate control */
1713 		mutex_enter(&sc->sc_mt_lock);
1714 		if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
1715 			sc->sc_flags |= IWK_F_RATE_AUTO_CTL;
1716 			/* set rate to some reasonable initial value */
1717 			i = in->in_rates.ir_nrates - 1;
1718 			while (i > 0 && IEEE80211_RATE(i) > 72)
1719 				i--;
1720 			in->in_txrate = i;
1721 		} else {
1722 			sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL;
1723 		}
1724 		mutex_exit(&sc->sc_mt_lock);
1725 
1726 		/* set LED on after associated */
1727 		iwk_set_led(sc, 2, 0, 1);
1728 		break;
1729 
1730 	case IEEE80211_S_INIT:
1731 		if (ostate == IEEE80211_S_SCAN) {
1732 			sc->sc_flags &= ~IWK_F_SCANNING;
1733 		}
1734 
1735 		/* set LED off after init */
1736 		iwk_set_led(sc, 2, 1, 0);
1737 		break;
1738 	case IEEE80211_S_ASSOC:
1739 		if (ostate == IEEE80211_S_SCAN) {
1740 			sc->sc_flags &= ~IWK_F_SCANNING;
1741 		}
1742 
1743 		break;
1744 	}
1745 
1746 	mutex_exit(&sc->sc_glock);
1747 
1748 	err = sc->sc_newstate(ic, nstate, arg);
1749 
1750 	if (nstate == IEEE80211_S_RUN) {
1751 
1752 		mutex_enter(&sc->sc_glock);
1753 
1754 		/*
1755 		 * make initialization for Receiver
1756 		 * sensitivity calibration
1757 		 */
1758 		err = iwk_rx_sens_init(sc);
1759 		if (err) {
1760 			cmn_err(CE_WARN, "iwk_newstate(): "
1761 			    "failed to init RX sensitivity\n");
1762 			mutex_exit(&sc->sc_glock);
1763 			return (err);
1764 		}
1765 
1766 		/* make initialization for Receiver gain balance */
1767 		err = iwk_rxgain_diff_init(sc);
1768 		if (err) {
1769 			cmn_err(CE_WARN, "iwk_newstate(): "
1770 			    "failed to init phy calibration\n");
1771 			mutex_exit(&sc->sc_glock);
1772 			return (err);
1773 		}
1774 
1775 		mutex_exit(&sc->sc_glock);
1776 
1777 	}
1778 
1779 	return (err);
1780 }
1781 
1782 static void
1783 iwk_watchdog(void *arg)
1784 {
1785 	iwk_sc_t *sc = arg;
1786 	struct ieee80211com *ic = &sc->sc_ic;
1787 #ifdef DEBUG
1788 	timeout_id_t timeout_id = ic->ic_watchdog_timer;
1789 #endif
1790 
1791 	ieee80211_stop_watchdog(ic);
1792 
1793 	if ((ic->ic_state != IEEE80211_S_AUTH) &&
1794 	    (ic->ic_state != IEEE80211_S_ASSOC))
1795 		return;
1796 
1797 	if (ic->ic_bss->in_fails > 0) {
1798 		IWK_DBG((IWK_DEBUG_80211, "watchdog (0x%x) reset: "
1799 		    "node (0x%x)\n", timeout_id, &ic->ic_bss));
1800 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
1801 	} else {
1802 		IWK_DBG((IWK_DEBUG_80211, "watchdog (0x%x) timeout: "
1803 		    "node (0x%x), retry (%d)\n",
1804 		    timeout_id, &ic->ic_bss, ic->ic_bss->in_fails + 1));
1805 		ieee80211_watchdog(ic);
1806 	}
1807 }
1808 
1809 /*ARGSUSED*/
1810 static int iwk_key_set(ieee80211com_t *ic, const struct ieee80211_key *k,
1811     const uint8_t mac[IEEE80211_ADDR_LEN])
1812 {
1813 	iwk_sc_t *sc = (iwk_sc_t *)ic;
1814 	iwk_add_sta_t node;
1815 	int err;
1816 	uint8_t index1;
1817 
1818 	switch (k->wk_cipher->ic_cipher) {
1819 	case IEEE80211_CIPHER_WEP:
1820 	case IEEE80211_CIPHER_TKIP:
1821 		return (1); /* sofeware do it. */
1822 	case IEEE80211_CIPHER_AES_CCM:
1823 		break;
1824 	default:
1825 		return (0);
1826 	}
1827 	sc->sc_config.filter_flags &= ~(RXON_FILTER_DIS_DECRYPT_MSK |
1828 	    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
1829 
1830 	mutex_enter(&sc->sc_glock);
1831 
1832 	/* update ap/multicast node */
1833 	(void) memset(&node, 0, sizeof (node));
1834 	if (IEEE80211_IS_MULTICAST(mac)) {
1835 		(void) memset(node.bssid, 0xff, 6);
1836 		node.id = IWK_BROADCAST_ID;
1837 	} else if (ic->ic_opmode == IEEE80211_M_IBSS) {
1838 		mutex_exit(&sc->sc_glock);
1839 		mutex_enter(&sc->sc_ibss.node_tb_lock);
1840 
1841 		/*
1842 		 * search for node in ibss node table
1843 		 */
1844 		for (index1 = IWK_STA_ID; index1 < IWK_STATION_COUNT;
1845 		    index1++) {
1846 			if (sc->sc_ibss.ibss_node_tb[index1].used &&
1847 			    IEEE80211_ADDR_EQ(sc->sc_ibss.
1848 			    ibss_node_tb[index1].node.bssid,
1849 			    mac)) {
1850 				break;
1851 			}
1852 		}
1853 		if (index1 >= IWK_BROADCAST_ID) {
1854 			cmn_err(CE_WARN, "iwk_key_set(): "
1855 			    "have no this node in hardware node table\n");
1856 			mutex_exit(&sc->sc_ibss.node_tb_lock);
1857 			return (0);
1858 		} else {
1859 			/*
1860 			 * configure key for given node in hardware
1861 			 */
1862 			if (k->wk_flags & IEEE80211_KEY_XMIT) {
1863 				sc->sc_ibss.ibss_node_tb[index1].
1864 				    node.key_flags = 0;
1865 				sc->sc_ibss.ibss_node_tb[index1].
1866 				    node.keyp = k->wk_keyix;
1867 			} else {
1868 				sc->sc_ibss.ibss_node_tb[index1].
1869 				    node.key_flags = (1 << 14);
1870 				sc->sc_ibss.ibss_node_tb[index1].
1871 				    node.keyp = k->wk_keyix + 4;
1872 			}
1873 
1874 			(void) memcpy(sc->sc_ibss.ibss_node_tb[index1].node.key,
1875 			    k->wk_key, k->wk_keylen);
1876 			sc->sc_ibss.ibss_node_tb[index1].node.key_flags |=
1877 			    (STA_KEY_FLG_CCMP | (1 << 3) | (k->wk_keyix << 8));
1878 			sc->sc_ibss.ibss_node_tb[index1].node.sta_mask =
1879 			    STA_MODIFY_KEY_MASK;
1880 			sc->sc_ibss.ibss_node_tb[index1].node.control = 1;
1881 
1882 			mutex_enter(&sc->sc_glock);
1883 			err = iwk_cmd(sc, REPLY_ADD_STA,
1884 			    &sc->sc_ibss.ibss_node_tb[index1].node,
1885 			    sizeof (iwk_add_sta_t), 1);
1886 			if (err != IWK_SUCCESS) {
1887 				cmn_err(CE_WARN, "iwk_key_set(): "
1888 				    "failed to update IBSS node in hardware\n");
1889 				mutex_exit(&sc->sc_glock);
1890 				mutex_exit(&sc->sc_ibss.node_tb_lock);
1891 				return (0);
1892 			}
1893 			mutex_exit(&sc->sc_glock);
1894 		}
1895 		mutex_exit(&sc->sc_ibss.node_tb_lock);
1896 		return (1);
1897 	} else {
1898 		IEEE80211_ADDR_COPY(node.bssid, ic->ic_bss->in_bssid);
1899 		node.id = IWK_AP_ID;
1900 	}
1901 	if (k->wk_flags & IEEE80211_KEY_XMIT) {
1902 		node.key_flags = 0;
1903 		node.keyp = k->wk_keyix;
1904 	} else {
1905 		node.key_flags = (1 << 14);
1906 		node.keyp = k->wk_keyix + 4;
1907 	}
1908 	(void) memcpy(node.key, k->wk_key, k->wk_keylen);
1909 	node.key_flags |= (STA_KEY_FLG_CCMP | (1 << 3) | (k->wk_keyix << 8));
1910 	node.sta_mask = STA_MODIFY_KEY_MASK;
1911 	node.control = 1;
1912 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
1913 	if (err != IWK_SUCCESS) {
1914 		cmn_err(CE_WARN, "iwk_key_set():"
1915 		    "failed to update ap node\n");
1916 		mutex_exit(&sc->sc_glock);
1917 		return (0);
1918 	}
1919 	mutex_exit(&sc->sc_glock);
1920 	return (1);
1921 }
1922 
1923 /*
1924  * exclusive access to mac begin.
1925  */
1926 static void
1927 iwk_mac_access_enter(iwk_sc_t *sc)
1928 {
1929 	uint32_t tmp;
1930 	int n;
1931 
1932 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
1933 	IWK_WRITE(sc, CSR_GP_CNTRL,
1934 	    tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1935 
1936 	/* wait until we succeed */
1937 	for (n = 0; n < 1000; n++) {
1938 		if ((IWK_READ(sc, CSR_GP_CNTRL) &
1939 		    (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1940 		    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) ==
1941 		    CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN)
1942 			break;
1943 		DELAY(10);
1944 	}
1945 	if (n == 1000)
1946 		IWK_DBG((IWK_DEBUG_PIO, "could not lock memory\n"));
1947 }
1948 
1949 /*
1950  * exclusive access to mac end.
1951  */
1952 static void
1953 iwk_mac_access_exit(iwk_sc_t *sc)
1954 {
1955 	uint32_t tmp = IWK_READ(sc, CSR_GP_CNTRL);
1956 	IWK_WRITE(sc, CSR_GP_CNTRL,
1957 	    tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1958 }
1959 
1960 static uint32_t
1961 iwk_mem_read(iwk_sc_t *sc, uint32_t addr)
1962 {
1963 	IWK_WRITE(sc, HBUS_TARG_MEM_RADDR, addr);
1964 	return (IWK_READ(sc, HBUS_TARG_MEM_RDAT));
1965 }
1966 
1967 static void
1968 iwk_mem_write(iwk_sc_t *sc, uint32_t addr, uint32_t data)
1969 {
1970 	IWK_WRITE(sc, HBUS_TARG_MEM_WADDR, addr);
1971 	IWK_WRITE(sc, HBUS_TARG_MEM_WDAT, data);
1972 }
1973 
1974 static uint32_t
1975 iwk_reg_read(iwk_sc_t *sc, uint32_t addr)
1976 {
1977 	IWK_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24));
1978 	return (IWK_READ(sc, HBUS_TARG_PRPH_RDAT));
1979 }
1980 
1981 static void
1982 iwk_reg_write(iwk_sc_t *sc, uint32_t addr, uint32_t data)
1983 {
1984 	IWK_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24));
1985 	IWK_WRITE(sc, HBUS_TARG_PRPH_WDAT, data);
1986 }
1987 
1988 static void
1989 iwk_reg_write_region_4(iwk_sc_t *sc, uint32_t addr,
1990     uint32_t *data, int wlen)
1991 {
1992 	for (; wlen > 0; wlen--, data++, addr += 4)
1993 		iwk_reg_write(sc, addr, *data);
1994 }
1995 
1996 
1997 /*
1998  * ucode load/initialization steps:
1999  * 1)  load Bootstrap State Machine (BSM) with "bootstrap" uCode image.
2000  * BSM contains a small memory that *always* stays powered up, so it can
2001  * retain the bootstrap program even when the card is in a power-saving
2002  * power-down state.  The BSM loads the small program into ARC processor's
2003  * instruction memory when triggered by power-up.
2004  * 2)  load Initialize image via bootstrap program.
2005  * The Initialize image sets up regulatory and calibration data for the
2006  * Runtime/Protocol uCode. This sends a REPLY_ALIVE notification when completed.
2007  * The 4965 reply contains calibration data for temperature, voltage and tx gain
2008  * correction.
2009  */
2010 static int
2011 iwk_load_firmware(iwk_sc_t *sc)
2012 {
2013 	uint32_t *boot_fw = (uint32_t *)sc->sc_boot;
2014 	uint32_t size = sc->sc_hdr->bootsz;
2015 	int n, err = IWK_SUCCESS;
2016 
2017 	/*
2018 	 * The physical address bit [4-35] of the initialize uCode.
2019 	 * In the initialize alive notify interrupt the physical address of
2020 	 * the runtime ucode will be set for loading.
2021 	 */
2022 	iwk_mac_access_enter(sc);
2023 
2024 	iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG,
2025 	    sc->sc_dma_fw_init_text.cookie.dmac_address >> 4);
2026 	iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG,
2027 	    sc->sc_dma_fw_init_data.cookie.dmac_address >> 4);
2028 	iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG,
2029 	    sc->sc_dma_fw_init_text.cookie.dmac_size);
2030 	iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG,
2031 	    sc->sc_dma_fw_init_data.cookie.dmac_size);
2032 
2033 	/* load bootstrap code into BSM memory */
2034 	iwk_reg_write_region_4(sc, BSM_SRAM_LOWER_BOUND, boot_fw,
2035 	    size / sizeof (uint32_t));
2036 
2037 	iwk_reg_write(sc, BSM_WR_MEM_SRC_REG, 0);
2038 	iwk_reg_write(sc, BSM_WR_MEM_DST_REG, RTC_INST_LOWER_BOUND);
2039 	iwk_reg_write(sc, BSM_WR_DWCOUNT_REG, size / sizeof (uint32_t));
2040 
2041 	/*
2042 	 * prepare to load initialize uCode
2043 	 */
2044 	iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
2045 
2046 	/* wait while the adapter is busy loading the firmware */
2047 	for (n = 0; n < 1000; n++) {
2048 		if (!(iwk_reg_read(sc, BSM_WR_CTRL_REG) &
2049 		    BSM_WR_CTRL_REG_BIT_START))
2050 			break;
2051 		DELAY(10);
2052 	}
2053 	if (n == 1000) {
2054 		cmn_err(CE_WARN, "timeout transferring firmware\n");
2055 		err = ETIMEDOUT;
2056 		return (err);
2057 	}
2058 
2059 	/* for future power-save mode use */
2060 	iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
2061 
2062 	iwk_mac_access_exit(sc);
2063 
2064 	return (err);
2065 }
2066 
2067 /*ARGSUSED*/
2068 static void
2069 iwk_rx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data)
2070 {
2071 	ieee80211com_t *ic = &sc->sc_ic;
2072 	iwk_rx_ring_t *ring = &sc->sc_rxq;
2073 	iwk_rx_phy_res_t *stat;
2074 	ieee80211_node_t *in;
2075 	uint32_t *tail;
2076 	struct ieee80211_frame *wh;
2077 	mblk_t *mp;
2078 	uint16_t len, rssi, mrssi, agc;
2079 	int16_t t;
2080 	uint32_t ants, i;
2081 	struct iwk_rx_non_cfg_phy *phyinfo;
2082 
2083 	/* assuming not 11n here. cope with 11n in phase-II */
2084 	stat = (iwk_rx_phy_res_t *)(desc + 1);
2085 	if (stat->cfg_phy_cnt > 20) {
2086 		return;
2087 	}
2088 
2089 	phyinfo = (struct iwk_rx_non_cfg_phy *)stat->non_cfg_phy;
2090 	agc = (phyinfo->agc_info & IWK_AGC_DB_MASK) >> IWK_AGC_DB_POS;
2091 	mrssi = 0;
2092 	ants = (stat->phy_flags & RX_PHY_FLAGS_ANTENNAE_MASK) >>
2093 	    RX_PHY_FLAGS_ANTENNAE_OFFSET;
2094 	for (i = 0; i < 3; i++) {
2095 		if (ants & (1 << i))
2096 			mrssi = MAX(mrssi, phyinfo->rssi_info[i << 1]);
2097 	}
2098 	t = mrssi - agc - 44; /* t is the dBM value */
2099 	/*
2100 	 * convert dBm to percentage ???
2101 	 */
2102 	rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t))) /
2103 	    (75 * 75);
2104 	if (rssi > 100)
2105 		rssi = 100;
2106 	if (rssi < 1)
2107 		rssi = 1;
2108 	len = stat->byte_count;
2109 	tail = (uint32_t *)((uint8_t *)(stat + 1) + stat->cfg_phy_cnt + len);
2110 
2111 	IWK_DBG((IWK_DEBUG_RX, "rx intr: idx=%d phy_len=%x len=%d "
2112 	    "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x "
2113 	    "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat),
2114 	    len, stat->rate.r.s.rate, stat->channel,
2115 	    LE_32(stat->timestampl), stat->non_cfg_phy_cnt,
2116 	    stat->cfg_phy_cnt, LE_32(*tail)));
2117 
2118 	if ((len < 16) || (len > sc->sc_dmabuf_sz)) {
2119 		IWK_DBG((IWK_DEBUG_RX, "rx frame oversize\n"));
2120 		return;
2121 	}
2122 
2123 	/*
2124 	 * discard Rx frames with bad CRC
2125 	 */
2126 	if ((LE_32(*tail) &
2127 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) !=
2128 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) {
2129 		IWK_DBG((IWK_DEBUG_RX, "rx crc error tail: %x\n",
2130 		    LE_32(*tail)));
2131 		sc->sc_rx_err++;
2132 		return;
2133 	}
2134 
2135 	wh = (struct ieee80211_frame *)
2136 	    ((uint8_t *)(stat + 1)+ stat->cfg_phy_cnt);
2137 	if (*(uint8_t *)wh == IEEE80211_FC0_SUBTYPE_ASSOC_RESP) {
2138 		sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2);
2139 		IWK_DBG((IWK_DEBUG_RX, "rx : association id = %x\n",
2140 		    sc->sc_assoc_id));
2141 	}
2142 #ifdef DEBUG
2143 	if (iwk_dbg_flags & IWK_DEBUG_RX)
2144 		ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0);
2145 #endif
2146 	in = ieee80211_find_rxnode(ic, wh);
2147 	mp = allocb(len, BPRI_MED);
2148 	if (mp) {
2149 		(void) memcpy(mp->b_wptr, wh, len);
2150 		mp->b_wptr += len;
2151 
2152 		/* send the frame to the 802.11 layer */
2153 		(void) ieee80211_input(ic, mp, in, rssi, 0);
2154 	} else {
2155 		sc->sc_rx_nobuf++;
2156 		IWK_DBG((IWK_DEBUG_RX,
2157 		    "iwk_rx_intr(): alloc rx buf failed\n"));
2158 	}
2159 	/* release node reference */
2160 	ieee80211_free_node(in);
2161 }
2162 
2163 /*ARGSUSED*/
2164 static void
2165 iwk_tx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data)
2166 {
2167 	ieee80211com_t *ic = &sc->sc_ic;
2168 	iwk_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3];
2169 	iwk_tx_stat_t *stat = (iwk_tx_stat_t *)(desc + 1);
2170 	iwk_amrr_t *amrr = (iwk_amrr_t *)ic->ic_bss;
2171 
2172 	IWK_DBG((IWK_DEBUG_TX, "tx done: qid=%d idx=%d"
2173 	    " retries=%d frame_count=%x nkill=%d "
2174 	    "rate=%x duration=%d status=%x\n",
2175 	    desc->hdr.qid, desc->hdr.idx, stat->ntries, stat->frame_count,
2176 	    stat->bt_kill_count, stat->rate.r.s.rate,
2177 	    LE_32(stat->duration), LE_32(stat->status)));
2178 
2179 	amrr->txcnt++;
2180 	IWK_DBG((IWK_DEBUG_RATECTL, "tx: %d cnt\n", amrr->txcnt));
2181 	if (stat->ntries > 0) {
2182 		amrr->retrycnt++;
2183 		sc->sc_tx_retries++;
2184 		IWK_DBG((IWK_DEBUG_TX, "tx: %d retries\n",
2185 		    sc->sc_tx_retries));
2186 	}
2187 
2188 	sc->sc_tx_timer = 0;
2189 
2190 	mutex_enter(&sc->sc_tx_lock);
2191 	ring->queued--;
2192 	if (ring->queued < 0)
2193 		ring->queued = 0;
2194 	if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count << 3))) {
2195 		sc->sc_need_reschedule = 0;
2196 		mutex_exit(&sc->sc_tx_lock);
2197 		mac_tx_update(ic->ic_mach);
2198 		mutex_enter(&sc->sc_tx_lock);
2199 	}
2200 	mutex_exit(&sc->sc_tx_lock);
2201 }
2202 
2203 static void
2204 iwk_cmd_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc)
2205 {
2206 	if ((desc->hdr.qid & 7) != 4) {
2207 		return;
2208 	}
2209 	mutex_enter(&sc->sc_glock);
2210 	sc->sc_flags |= IWK_F_CMD_DONE;
2211 	cv_signal(&sc->sc_cmd_cv);
2212 	mutex_exit(&sc->sc_glock);
2213 	IWK_DBG((IWK_DEBUG_CMD, "rx cmd: "
2214 	    "qid=%x idx=%d flags=%x type=0x%x\n",
2215 	    desc->hdr.qid, desc->hdr.idx, desc->hdr.flags,
2216 	    desc->hdr.type));
2217 }
2218 
2219 static void
2220 iwk_ucode_alive(iwk_sc_t *sc, iwk_rx_desc_t *desc)
2221 {
2222 	uint32_t base, i;
2223 	struct iwk_alive_resp *ar =
2224 	    (struct iwk_alive_resp *)(desc + 1);
2225 
2226 	/* the microcontroller is ready */
2227 	IWK_DBG((IWK_DEBUG_FW,
2228 	    "microcode alive notification minor: %x major: %x type:"
2229 	    " %x subtype: %x\n",
2230 	    ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype));
2231 
2232 	if (LE_32(ar->is_valid) != UCODE_VALID_OK) {
2233 		IWK_DBG((IWK_DEBUG_FW,
2234 		    "microcontroller initialization failed\n"));
2235 	}
2236 	if (ar->ver_subtype == INITIALIZE_SUBTYPE) {
2237 		IWK_DBG((IWK_DEBUG_FW,
2238 		    "initialization alive received.\n"));
2239 		(void) memcpy(&sc->sc_card_alive_init, ar,
2240 		    sizeof (struct iwk_init_alive_resp));
2241 		/* XXX get temperature */
2242 		iwk_mac_access_enter(sc);
2243 		iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG,
2244 		    sc->sc_dma_fw_text.cookie.dmac_address >> 4);
2245 		iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG,
2246 		    sc->sc_dma_fw_data_bak.cookie.dmac_address >> 4);
2247 		iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG,
2248 		    sc->sc_dma_fw_data.cookie.dmac_size);
2249 		iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG,
2250 		    sc->sc_dma_fw_text.cookie.dmac_size | 0x80000000);
2251 		iwk_mac_access_exit(sc);
2252 	} else {
2253 		IWK_DBG((IWK_DEBUG_FW, "runtime alive received.\n"));
2254 		(void) memcpy(&sc->sc_card_alive_run, ar,
2255 		    sizeof (struct iwk_alive_resp));
2256 
2257 		/*
2258 		 * Init SCD related registers to make Tx work. XXX
2259 		 */
2260 		iwk_mac_access_enter(sc);
2261 
2262 		/* read sram address of data base */
2263 		sc->sc_scd_base = iwk_reg_read(sc, SCD_SRAM_BASE_ADDR);
2264 
2265 		/* clear and init SCD_CONTEXT_DATA_OFFSET area. 128 bytes */
2266 		for (base = sc->sc_scd_base + SCD_CONTEXT_DATA_OFFSET, i = 0;
2267 		    i < 128; i += 4)
2268 			iwk_mem_write(sc, base + i, 0);
2269 
2270 		/* clear and init SCD_TX_STTS_BITMAP_OFFSET area. 256 bytes */
2271 		for (base = sc->sc_scd_base + SCD_TX_STTS_BITMAP_OFFSET;
2272 		    i < 256; i += 4)
2273 			iwk_mem_write(sc, base + i, 0);
2274 
2275 		/* clear and init SCD_TRANSLATE_TBL_OFFSET area. 32 bytes */
2276 		for (base = sc->sc_scd_base + SCD_TRANSLATE_TBL_OFFSET;
2277 		    i < sizeof (uint16_t) * IWK_NUM_QUEUES; i += 4)
2278 			iwk_mem_write(sc, base + i, 0);
2279 
2280 		iwk_reg_write(sc, SCD_DRAM_BASE_ADDR,
2281 		    sc->sc_dma_sh.cookie.dmac_address >> 10);
2282 		iwk_reg_write(sc, SCD_QUEUECHAIN_SEL, 0);
2283 
2284 		/* initiate the tx queues */
2285 		for (i = 0; i < IWK_NUM_QUEUES; i++) {
2286 			iwk_reg_write(sc, SCD_QUEUE_RDPTR(i), 0);
2287 			IWK_WRITE(sc, HBUS_TARG_WRPTR, (i << 8));
2288 			iwk_mem_write(sc, sc->sc_scd_base +
2289 			    SCD_CONTEXT_QUEUE_OFFSET(i),
2290 			    (SCD_WIN_SIZE & 0x7f));
2291 			iwk_mem_write(sc, sc->sc_scd_base +
2292 			    SCD_CONTEXT_QUEUE_OFFSET(i) + sizeof (uint32_t),
2293 			    (SCD_FRAME_LIMIT & 0x7f) << 16);
2294 		}
2295 		/* interrupt enable on each queue0-7 */
2296 		iwk_reg_write(sc, SCD_INTERRUPT_MASK,
2297 		    (1 << IWK_NUM_QUEUES) - 1);
2298 		/* enable  each channel 0-7 */
2299 		iwk_reg_write(sc, SCD_TXFACT,
2300 		    SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
2301 		/*
2302 		 * queue 0-7 maps to FIFO 0-7 and
2303 		 * all queues work under FIFO mode (none-scheduler-ack)
2304 		 */
2305 		for (i = 0; i < 7; i++) {
2306 			iwk_reg_write(sc,
2307 			    SCD_QUEUE_STATUS_BITS(i),
2308 			    (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
2309 			    (i << SCD_QUEUE_STTS_REG_POS_TXF)|
2310 			    SCD_QUEUE_STTS_REG_MSK);
2311 		}
2312 		iwk_mac_access_exit(sc);
2313 
2314 		sc->sc_flags |= IWK_F_FW_INIT;
2315 		cv_signal(&sc->sc_fw_cv);
2316 	}
2317 
2318 }
2319 
2320 static uint_t
2321 /* LINTED: argument unused in function: unused */
2322 iwk_rx_softintr(caddr_t arg, caddr_t unused)
2323 {
2324 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2325 	ieee80211com_t *ic = &sc->sc_ic;
2326 	iwk_rx_desc_t *desc;
2327 	iwk_rx_data_t *data;
2328 	uint32_t index;
2329 
2330 	mutex_enter(&sc->sc_glock);
2331 	if (sc->sc_rx_softint_pending != 1) {
2332 		mutex_exit(&sc->sc_glock);
2333 		return (DDI_INTR_UNCLAIMED);
2334 	}
2335 	/* disable interrupts */
2336 	IWK_WRITE(sc, CSR_INT_MASK, 0);
2337 	mutex_exit(&sc->sc_glock);
2338 
2339 	/*
2340 	 * firmware has moved the index of the rx queue, driver get it,
2341 	 * and deal with it.
2342 	 */
2343 	index = LE_32(sc->sc_shared->val0) & 0xfff;
2344 
2345 	while (sc->sc_rxq.cur != index) {
2346 		data = &sc->sc_rxq.data[sc->sc_rxq.cur];
2347 		desc = (iwk_rx_desc_t *)data->dma_data.mem_va;
2348 
2349 		IWK_DBG((IWK_DEBUG_INTR, "rx notification index = %d"
2350 		    " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n",
2351 		    index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx,
2352 		    desc->hdr.flags, desc->hdr.type, LE_32(desc->len)));
2353 
2354 		/* a command other than a tx need to be replied */
2355 		if (!(desc->hdr.qid & 0x80) &&
2356 		    (desc->hdr.type != REPLY_RX_PHY_CMD) &&
2357 		    (desc->hdr.type != REPLY_TX) &&
2358 		    (desc->hdr.type != REPLY_TX_PWR_TABLE_CMD) &&
2359 		    (desc->hdr.type != REPLY_PHY_CALIBRATION_CMD) &&
2360 		    (desc->hdr.type != SENSITIVITY_CMD))
2361 			iwk_cmd_intr(sc, desc);
2362 
2363 		switch (desc->hdr.type) {
2364 		case REPLY_4965_RX:
2365 			iwk_rx_intr(sc, desc, data);
2366 			break;
2367 
2368 		case REPLY_TX:
2369 			iwk_tx_intr(sc, desc, data);
2370 			break;
2371 
2372 		case REPLY_ALIVE:
2373 			iwk_ucode_alive(sc, desc);
2374 			break;
2375 
2376 		case CARD_STATE_NOTIFICATION:
2377 		{
2378 			uint32_t *status = (uint32_t *)(desc + 1);
2379 
2380 			IWK_DBG((IWK_DEBUG_RADIO, "state changed to %x\n",
2381 			    LE_32(*status)));
2382 
2383 			if (LE_32(*status) & 1) {
2384 				/*
2385 				 * the radio button has to be pushed(OFF). It
2386 				 * is considered as a hw error, the
2387 				 * iwk_thread() tries to recover it after the
2388 				 * button is pushed again(ON)
2389 				 */
2390 				cmn_err(CE_NOTE,
2391 				    "iwk_rx_softintr(): "
2392 				    "Radio transmitter is off\n");
2393 				sc->sc_ostate = sc->sc_ic.ic_state;
2394 				ieee80211_new_state(&sc->sc_ic,
2395 				    IEEE80211_S_INIT, -1);
2396 				sc->sc_flags |=
2397 				    (IWK_F_HW_ERR_RECOVER | IWK_F_RADIO_OFF);
2398 			}
2399 			break;
2400 		}
2401 		case SCAN_START_NOTIFICATION:
2402 		{
2403 			iwk_start_scan_t *scan =
2404 			    (iwk_start_scan_t *)(desc + 1);
2405 
2406 			IWK_DBG((IWK_DEBUG_SCAN,
2407 			    "scanning channel %d status %x\n",
2408 			    scan->chan, LE_32(scan->status)));
2409 
2410 			ic->ic_curchan = &ic->ic_sup_channels[scan->chan];
2411 			break;
2412 		}
2413 		case SCAN_COMPLETE_NOTIFICATION:
2414 		{
2415 			iwk_stop_scan_t *scan =
2416 			    (iwk_stop_scan_t *)(desc + 1);
2417 
2418 			IWK_DBG((IWK_DEBUG_SCAN,
2419 			    "completed channel %d (burst of %d) status %02x\n",
2420 			    scan->chan, scan->nchan, scan->status));
2421 
2422 			sc->sc_scan_pending++;
2423 			break;
2424 		}
2425 		case STATISTICS_NOTIFICATION:
2426 			/* handle statistics notification */
2427 			iwk_statistics_notify(sc, desc);
2428 			break;
2429 		}
2430 
2431 		sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE;
2432 	}
2433 
2434 	/*
2435 	 * driver dealt with what reveived in rx queue and tell the information
2436 	 * to the firmware.
2437 	 */
2438 	index = (index == 0) ? RX_QUEUE_SIZE - 1 : index - 1;
2439 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7));
2440 
2441 	mutex_enter(&sc->sc_glock);
2442 	/* re-enable interrupts */
2443 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2444 	sc->sc_rx_softint_pending = 0;
2445 	mutex_exit(&sc->sc_glock);
2446 
2447 	return (DDI_INTR_CLAIMED);
2448 }
2449 
2450 static uint_t
2451 /* LINTED: argument unused in function: unused */
2452 iwk_intr(caddr_t arg, caddr_t unused)
2453 {
2454 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2455 	uint32_t r, rfh;
2456 
2457 	mutex_enter(&sc->sc_glock);
2458 
2459 	if (sc->sc_flags & IWK_F_SUSPEND) {
2460 		mutex_exit(&sc->sc_glock);
2461 		return (DDI_INTR_UNCLAIMED);
2462 	}
2463 
2464 	r = IWK_READ(sc, CSR_INT);
2465 	if (r == 0 || r == 0xffffffff) {
2466 		mutex_exit(&sc->sc_glock);
2467 		return (DDI_INTR_UNCLAIMED);
2468 	}
2469 
2470 	IWK_DBG((IWK_DEBUG_INTR, "interrupt reg %x\n", r));
2471 
2472 	rfh = IWK_READ(sc, CSR_FH_INT_STATUS);
2473 	IWK_DBG((IWK_DEBUG_INTR, "FH interrupt reg %x\n", rfh));
2474 	/* disable interrupts */
2475 	IWK_WRITE(sc, CSR_INT_MASK, 0);
2476 	/* ack interrupts */
2477 	IWK_WRITE(sc, CSR_INT, r);
2478 	IWK_WRITE(sc, CSR_FH_INT_STATUS, rfh);
2479 
2480 	if (sc->sc_soft_hdl == NULL) {
2481 		mutex_exit(&sc->sc_glock);
2482 		return (DDI_INTR_CLAIMED);
2483 	}
2484 	if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) {
2485 		cmn_err(CE_WARN, "fatal firmware error\n");
2486 		mutex_exit(&sc->sc_glock);
2487 #ifdef DEBUG
2488 		/* dump event and error logs to dmesg */
2489 		iwk_write_error_log(sc);
2490 		iwk_write_event_log(sc);
2491 #endif /* DEBUG */
2492 		iwk_stop(sc);
2493 		sc->sc_ostate = sc->sc_ic.ic_state;
2494 		ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
2495 		sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
2496 		return (DDI_INTR_CLAIMED);
2497 	}
2498 
2499 	if (r & BIT_INT_RF_KILL) {
2500 		IWK_DBG((IWK_DEBUG_RADIO, "RF kill\n"));
2501 	}
2502 
2503 	if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) ||
2504 	    (rfh & FH_INT_RX_MASK)) {
2505 		sc->sc_rx_softint_pending = 1;
2506 		(void) ddi_intr_trigger_softint(sc->sc_soft_hdl, NULL);
2507 	}
2508 
2509 	if (r & BIT_INT_ALIVE)	{
2510 		IWK_DBG((IWK_DEBUG_FW, "firmware initialized.\n"));
2511 	}
2512 
2513 	/* re-enable interrupts */
2514 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2515 	mutex_exit(&sc->sc_glock);
2516 
2517 	return (DDI_INTR_CLAIMED);
2518 }
2519 
2520 static uint8_t
2521 iwk_rate_to_plcp(int rate)
2522 {
2523 	uint8_t ret;
2524 
2525 	switch (rate) {
2526 	/* CCK rates */
2527 	case 2:
2528 		ret = 0xa;
2529 		break;
2530 	case 4:
2531 		ret = 0x14;
2532 		break;
2533 	case 11:
2534 		ret = 0x37;
2535 		break;
2536 	case 22:
2537 		ret = 0x6e;
2538 		break;
2539 	/* OFDM rates */
2540 	case 12:
2541 		ret = 0xd;
2542 		break;
2543 	case 18:
2544 		ret = 0xf;
2545 		break;
2546 	case 24:
2547 		ret = 0x5;
2548 		break;
2549 	case 36:
2550 		ret = 0x7;
2551 		break;
2552 	case 48:
2553 		ret = 0x9;
2554 		break;
2555 	case 72:
2556 		ret = 0xb;
2557 		break;
2558 	case 96:
2559 		ret = 0x1;
2560 		break;
2561 	case 108:
2562 		ret = 0x3;
2563 		break;
2564 	default:
2565 		ret = 0;
2566 		break;
2567 	}
2568 	return (ret);
2569 }
2570 
2571 static mblk_t *
2572 iwk_m_tx(void *arg, mblk_t *mp)
2573 {
2574 	iwk_sc_t	*sc = (iwk_sc_t *)arg;
2575 	ieee80211com_t	*ic = &sc->sc_ic;
2576 	mblk_t			*next;
2577 
2578 	if (sc->sc_flags & IWK_F_SUSPEND) {
2579 		freemsgchain(mp);
2580 		return (NULL);
2581 	}
2582 
2583 	if (ic->ic_state != IEEE80211_S_RUN) {
2584 		freemsgchain(mp);
2585 		return (NULL);
2586 	}
2587 
2588 	while (mp != NULL) {
2589 		next = mp->b_next;
2590 		mp->b_next = NULL;
2591 		if (iwk_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) {
2592 			mp->b_next = next;
2593 			break;
2594 		}
2595 		mp = next;
2596 	}
2597 	return (mp);
2598 }
2599 
2600 /* ARGSUSED */
2601 static int
2602 iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
2603 {
2604 	iwk_sc_t *sc = (iwk_sc_t *)ic;
2605 	iwk_tx_ring_t *ring;
2606 	iwk_tx_desc_t *desc;
2607 	iwk_tx_data_t *data;
2608 	iwk_cmd_t *cmd;
2609 	iwk_tx_cmd_t *tx;
2610 	ieee80211_node_t *in;
2611 	struct ieee80211_frame *wh;
2612 	struct ieee80211_key *k = NULL;
2613 	mblk_t *m, *m0;
2614 	int rate, hdrlen, len, len0, mblen, off, err = IWK_SUCCESS;
2615 	uint16_t masks = 0;
2616 	uint8_t index, index1, index2;
2617 
2618 	ring = &sc->sc_txq[0];
2619 	data = &ring->data[ring->cur];
2620 	desc = data->desc;
2621 	cmd = data->cmd;
2622 	bzero(desc, sizeof (*desc));
2623 	bzero(cmd, sizeof (*cmd));
2624 
2625 	mutex_enter(&sc->sc_tx_lock);
2626 	if (sc->sc_flags & IWK_F_SUSPEND) {
2627 		mutex_exit(&sc->sc_tx_lock);
2628 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
2629 		    IEEE80211_FC0_TYPE_DATA) {
2630 			freemsg(mp);
2631 		}
2632 		err = IWK_FAIL;
2633 		goto exit;
2634 	}
2635 
2636 	if (ring->queued > ring->count - 64) {
2637 		IWK_DBG((IWK_DEBUG_TX, "iwk_send(): no txbuf\n"));
2638 		sc->sc_need_reschedule = 1;
2639 		mutex_exit(&sc->sc_tx_lock);
2640 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
2641 		    IEEE80211_FC0_TYPE_DATA) {
2642 			freemsg(mp);
2643 		}
2644 		sc->sc_tx_nobuf++;
2645 		err = IWK_FAIL;
2646 		goto exit;
2647 	}
2648 	mutex_exit(&sc->sc_tx_lock);
2649 
2650 	hdrlen = sizeof (struct ieee80211_frame);
2651 
2652 	m = allocb(msgdsize(mp) + 32, BPRI_MED);
2653 	if (m == NULL) { /* can not alloc buf, drop this package */
2654 		cmn_err(CE_WARN,
2655 		    "iwk_send(): failed to allocate msgbuf\n");
2656 		freemsg(mp);
2657 		err = IWK_SUCCESS;
2658 		goto exit;
2659 	}
2660 	for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) {
2661 		mblen = MBLKL(m0);
2662 		(void) memcpy(m->b_rptr + off, m0->b_rptr, mblen);
2663 		off += mblen;
2664 	}
2665 	m->b_wptr += off;
2666 	freemsg(mp);
2667 
2668 	wh = (struct ieee80211_frame *)m->b_rptr;
2669 
2670 	if (ic->ic_opmode == IEEE80211_M_IBSS &&
2671 	    (!(IEEE80211_IS_MULTICAST(wh->i_addr1)))) {
2672 		mutex_enter(&sc->sc_glock);
2673 		mutex_enter(&sc->sc_ibss.node_tb_lock);
2674 
2675 		/*
2676 		 * search for node in ibss node table
2677 		 */
2678 		for (index1 = IWK_STA_ID;
2679 		    index1 < IWK_STATION_COUNT; index1++) {
2680 			if (sc->sc_ibss.ibss_node_tb[index1].used &&
2681 			    IEEE80211_ADDR_EQ(sc->sc_ibss.
2682 			    ibss_node_tb[index1].node.bssid,
2683 			    wh->i_addr1)) {
2684 				break;
2685 			}
2686 		}
2687 
2688 		/*
2689 		 * if don't find in ibss node table
2690 		 */
2691 		if (index1 >= IWK_BROADCAST_ID) {
2692 			err = iwk_clean_add_node_ibss(ic,
2693 			    wh->i_addr1, &index2);
2694 			if (err != IWK_SUCCESS) {
2695 				cmn_err(CE_WARN, "iwk_send(): "
2696 				    "failed to clean all nodes "
2697 				    "and add one node\n");
2698 				mutex_exit(&sc->sc_ibss.node_tb_lock);
2699 				mutex_exit(&sc->sc_glock);
2700 				freemsg(m);
2701 				sc->sc_tx_err++;
2702 				err = IWK_SUCCESS;
2703 				goto exit;
2704 			}
2705 			index = index2;
2706 		} else {
2707 			index = index1;
2708 		}
2709 		mutex_exit(&sc->sc_ibss.node_tb_lock);
2710 		mutex_exit(&sc->sc_glock);
2711 	}
2712 
2713 	in = ieee80211_find_txnode(ic, wh->i_addr1);
2714 	if (in == NULL) {
2715 		cmn_err(CE_WARN, "iwk_send(): failed to find tx node\n");
2716 		freemsg(m);
2717 		sc->sc_tx_err++;
2718 		err = IWK_SUCCESS;
2719 		goto exit;
2720 	}
2721 	(void) ieee80211_encap(ic, m, in);
2722 
2723 	cmd->hdr.type = REPLY_TX;
2724 	cmd->hdr.flags = 0;
2725 	cmd->hdr.qid = ring->qid;
2726 	cmd->hdr.idx = ring->cur;
2727 
2728 	tx = (iwk_tx_cmd_t *)cmd->data;
2729 	tx->tx_flags = 0;
2730 
2731 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2732 		tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK));
2733 	} else {
2734 		tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
2735 	}
2736 
2737 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
2738 		k = ieee80211_crypto_encap(ic, m);
2739 		if (k == NULL) {
2740 			freemsg(m);
2741 			sc->sc_tx_err++;
2742 			err = IWK_SUCCESS;
2743 			goto exit;
2744 		}
2745 
2746 		if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
2747 			tx->sec_ctl = 2; /* for CCMP */
2748 			tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
2749 			(void) memcpy(&tx->key, k->wk_key, k->wk_keylen);
2750 		}
2751 
2752 		/* packet header may have moved, reset our local pointer */
2753 		wh = (struct ieee80211_frame *)m->b_rptr;
2754 	}
2755 
2756 	len = msgdsize(m);
2757 
2758 #ifdef DEBUG
2759 	if (iwk_dbg_flags & IWK_DEBUG_TX)
2760 		ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0);
2761 #endif
2762 
2763 	/* pickup a rate */
2764 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
2765 	    IEEE80211_FC0_TYPE_MGT) {
2766 		/* mgmt frames are sent at 1M */
2767 		rate = in->in_rates.ir_rates[0];
2768 	} else {
2769 		/*
2770 		 * do it here for the software way rate control.
2771 		 * later for rate scaling in hardware.
2772 		 * maybe like the following, for management frame:
2773 		 * tx->initial_rate_index = LINK_QUAL_MAX_RETRY_NUM - 1;
2774 		 * for data frame:
2775 		 * tx->tx_flags |= (LE_32(TX_CMD_FLG_STA_RATE_MSK));
2776 		 * rate = in->in_rates.ir_rates[in->in_txrate];
2777 		 * tx->initial_rate_index = 1;
2778 		 *
2779 		 * now the txrate is determined in tx cmd flags, set to the
2780 		 * max value 54M for 11g and 11M for 11b.
2781 		 */
2782 
2783 		if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
2784 			rate = ic->ic_fixed_rate;
2785 		} else {
2786 			rate = in->in_rates.ir_rates[in->in_txrate];
2787 		}
2788 	}
2789 	rate &= IEEE80211_RATE_VAL;
2790 	IWK_DBG((IWK_DEBUG_TX, "tx rate[%d of %d] = %x",
2791 	    in->in_txrate, in->in_rates.ir_nrates, rate));
2792 
2793 	tx->tx_flags |= (LE_32(TX_CMD_FLG_SEQ_CTL_MSK));
2794 
2795 	len0 = roundup(4 + sizeof (iwk_tx_cmd_t) + hdrlen, 4);
2796 	if (len0 != (4 + sizeof (iwk_tx_cmd_t) + hdrlen))
2797 		tx->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
2798 
2799 	/* retrieve destination node's id */
2800 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2801 		tx->sta_id = IWK_BROADCAST_ID;
2802 	} else {
2803 		if (ic->ic_opmode == IEEE80211_M_IBSS)
2804 			tx->sta_id = index;
2805 		else
2806 			tx->sta_id = IWK_AP_ID;
2807 	}
2808 
2809 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
2810 	    IEEE80211_FC0_TYPE_MGT) {
2811 		/* tell h/w to set timestamp in probe responses */
2812 		if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2813 		    IEEE80211_FC0_SUBTYPE_PROBE_RESP)
2814 			tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK);
2815 
2816 		if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2817 		    IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
2818 		    ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2819 		    IEEE80211_FC0_SUBTYPE_REASSOC_REQ))
2820 			tx->timeout.pm_frame_timeout = 3;
2821 		else
2822 			tx->timeout.pm_frame_timeout = 2;
2823 	} else
2824 		tx->timeout.pm_frame_timeout = 0;
2825 	if (rate == 2 || rate == 4 || rate == 11 || rate == 22)
2826 		masks |= RATE_MCS_CCK_MSK;
2827 
2828 	masks |= RATE_MCS_ANT_B_MSK;
2829 	tx->rate.r.rate_n_flags = (iwk_rate_to_plcp(rate) | masks);
2830 
2831 	IWK_DBG((IWK_DEBUG_TX, "tx flag = %x",
2832 	    tx->tx_flags));
2833 
2834 	tx->rts_retry_limit = 60;
2835 	tx->data_retry_limit = 15;
2836 
2837 	tx->stop_time.life_time  = LE_32(0xffffffff);
2838 
2839 	tx->len = LE_16(len);
2840 
2841 	tx->dram_lsb_ptr =
2842 	    data->paddr_cmd + 4 + offsetof(iwk_tx_cmd_t, scratch);
2843 	tx->dram_msb_ptr = 0;
2844 	tx->driver_txop = 0;
2845 	tx->next_frame_len = 0;
2846 
2847 	(void) memcpy(tx + 1, m->b_rptr, hdrlen);
2848 	m->b_rptr += hdrlen;
2849 	(void) memcpy(data->dma_data.mem_va, m->b_rptr, len - hdrlen);
2850 
2851 	IWK_DBG((IWK_DEBUG_TX, "sending data: qid=%d idx=%d len=%d",
2852 	    ring->qid, ring->cur, len));
2853 
2854 	/*
2855 	 * first segment includes the tx cmd plus the 802.11 header,
2856 	 * the second includes the remaining of the 802.11 frame.
2857 	 */
2858 	desc->val0 = LE_32(2 << 24);
2859 	desc->pa[0].tb1_addr = LE_32(data->paddr_cmd);
2860 	desc->pa[0].val1 = ((len0 << 4) & 0xfff0) |
2861 	    ((data->dma_data.cookie.dmac_address & 0xffff) << 16);
2862 	desc->pa[0].val2 =
2863 	    ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) |
2864 	    ((len - hdrlen) << 20);
2865 	IWK_DBG((IWK_DEBUG_TX, "phy addr1 = 0x%x phy addr2 = 0x%x "
2866 	    "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x",
2867 	    data->paddr_cmd, data->dma_data.cookie.dmac_address,
2868 	    len0, len - hdrlen, desc->pa[0].val1, desc->pa[0].val2));
2869 
2870 	mutex_enter(&sc->sc_tx_lock);
2871 	ring->queued++;
2872 	mutex_exit(&sc->sc_tx_lock);
2873 
2874 	/* kick ring */
2875 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
2876 	    tfd_offset[ring->cur].val = 8 + len;
2877 	if (ring->cur < IWK_MAX_WIN_SIZE) {
2878 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
2879 		    tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8 + len;
2880 	}
2881 
2882 	IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
2883 	IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
2884 
2885 	ring->cur = (ring->cur + 1) % ring->count;
2886 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
2887 	freemsg(m);
2888 	/* release node reference */
2889 	ieee80211_free_node(in);
2890 
2891 	ic->ic_stats.is_tx_bytes += len;
2892 	ic->ic_stats.is_tx_frags++;
2893 
2894 	if (sc->sc_tx_timer == 0)
2895 		sc->sc_tx_timer = 10;
2896 exit:
2897 	return (err);
2898 }
2899 
2900 static void
2901 iwk_m_ioctl(void* arg, queue_t *wq, mblk_t *mp)
2902 {
2903 	iwk_sc_t	*sc  = (iwk_sc_t *)arg;
2904 	ieee80211com_t	*ic = &sc->sc_ic;
2905 
2906 	enum ieee80211_opmode		oldmod;
2907 	iwk_tx_power_table_cmd_t	txpower;
2908 	iwk_add_sta_t			node;
2909 	iwk_link_quality_cmd_t		link_quality;
2910 	uint16_t			masks = 0;
2911 	int				i, err, err1;
2912 
2913 	oldmod = ic->ic_opmode;
2914 
2915 	err = ieee80211_ioctl(ic, wq, mp);
2916 
2917 	/*
2918 	 * return to STA mode
2919 	 */
2920 	if ((0 == err || ENETRESET == err) && (oldmod != ic->ic_opmode) &&
2921 	    (ic->ic_opmode == IEEE80211_M_STA)) {
2922 		/* configure rxon */
2923 		(void) memset(&sc->sc_config, 0, sizeof (iwk_rxon_cmd_t));
2924 		IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
2925 		IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
2926 		sc->sc_config.chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
2927 		sc->sc_config.flags = (RXON_FLG_TSF2HOST_MSK |
2928 		    RXON_FLG_AUTO_DETECT_MSK |
2929 		    RXON_FLG_BAND_24G_MSK);
2930 		sc->sc_config.flags &= (~RXON_FLG_CCK_MSK);
2931 		switch (ic->ic_opmode) {
2932 		case IEEE80211_M_STA:
2933 			sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
2934 			sc->sc_config.filter_flags |=
2935 			    LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
2936 			    RXON_FILTER_DIS_DECRYPT_MSK |
2937 			    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
2938 			break;
2939 		case IEEE80211_M_IBSS:
2940 		case IEEE80211_M_AHDEMO:
2941 			sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
2942 			sc->sc_config.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2943 			sc->sc_config.filter_flags =
2944 			    LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
2945 			    RXON_FILTER_DIS_DECRYPT_MSK |
2946 			    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
2947 			break;
2948 		case IEEE80211_M_HOSTAP:
2949 			sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
2950 			break;
2951 		case IEEE80211_M_MONITOR:
2952 			sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
2953 			sc->sc_config.filter_flags |=
2954 			    LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
2955 			    RXON_FILTER_CTL2HOST_MSK |
2956 			    RXON_FILTER_PROMISC_MSK);
2957 			break;
2958 		}
2959 		sc->sc_config.cck_basic_rates  = 0x0f;
2960 		sc->sc_config.ofdm_basic_rates = 0xff;
2961 		sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
2962 		sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff;
2963 		/* set antenna */
2964 		mutex_enter(&sc->sc_glock);
2965 		sc->sc_config.rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
2966 		    LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) |
2967 		    (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
2968 		    (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
2969 		err1 = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
2970 		    sizeof (iwk_rxon_cmd_t), 1);
2971 		if (err1 != IWK_SUCCESS) {
2972 			cmn_err(CE_WARN, "iwk_m_ioctl(): "
2973 			    "failed to set configure command"
2974 			    " please run (ifconfig unplumb and"
2975 			    " ifconfig plumb)\n");
2976 		}
2977 		/*
2978 		 * set Tx power for 2.4GHz channels
2979 		 * (need further investigation. fix tx power at present)
2980 		 */
2981 		(void) memset(&txpower, 0, sizeof (txpower));
2982 		txpower.band = 1; /* for 2.4G */
2983 		txpower.channel = sc->sc_config.chan;
2984 		txpower.channel_normal_width = 0;
2985 		for (i = 0; i < POWER_TABLE_NUM_HT_OFDM_ENTRIES; i++) {
2986 			txpower.tx_power.ht_ofdm_power[i].
2987 			    s.ramon_tx_gain = 0x3f3f;
2988 			txpower.tx_power.ht_ofdm_power[i].
2989 			    s.dsp_predis_atten = 110 | (110 << 8);
2990 		}
2991 		txpower.tx_power.legacy_cck_power.s.ramon_tx_gain = 0x3f3f;
2992 		txpower.tx_power.legacy_cck_power.s.dsp_predis_atten
2993 		    = 110 | (110 << 8);
2994 		err1 = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &txpower,
2995 		    sizeof (txpower), 1);
2996 		if (err1 != IWK_SUCCESS) {
2997 			cmn_err(CE_WARN, "iwk_m_ioctl(): failed to set txpower"
2998 			    " please run (ifconfig unplumb "
2999 			    "and ifconfig plumb)\n");
3000 		}
3001 		/* add broadcast node so that we can send broadcast frame */
3002 		(void) memset(&node, 0, sizeof (node));
3003 		(void) memset(node.bssid, 0xff, 6);
3004 		node.id = IWK_BROADCAST_ID;
3005 		err1 = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
3006 		if (err1 != IWK_SUCCESS) {
3007 			cmn_err(CE_WARN, "iwk_m_ioctl(): "
3008 			    "failed to add broadcast node\n");
3009 		}
3010 
3011 		/* TX_LINK_QUALITY cmd */
3012 		(void) memset(&link_quality, 0, sizeof (link_quality));
3013 		for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3014 			masks |= RATE_MCS_CCK_MSK;
3015 			masks |= RATE_MCS_ANT_B_MSK;
3016 			masks &= ~RATE_MCS_ANT_A_MSK;
3017 			link_quality.rate_n_flags[i] =
3018 			    iwk_rate_to_plcp(2) | masks;
3019 		}
3020 		link_quality.general_params.single_stream_ant_msk = 2;
3021 		link_quality.general_params.dual_stream_ant_msk = 3;
3022 		link_quality.agg_params.agg_dis_start_th = 3;
3023 		link_quality.agg_params.agg_time_limit = LE_16(4000);
3024 		link_quality.sta_id = IWK_BROADCAST_ID;
3025 		err1 = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
3026 		    sizeof (link_quality), 1);
3027 		if (err1 != IWK_SUCCESS) {
3028 			cmn_err(CE_WARN, "iwk_m_ioctl(): "
3029 			    "failed to config link quality table\n");
3030 		}
3031 		mutex_exit(&sc->sc_glock);
3032 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3033 	}
3034 
3035 	if (err == ENETRESET) {
3036 		/*
3037 		 * This is special for the hidden AP connection.
3038 		 * In any case, we should make sure only one 'scan'
3039 		 * in the driver for a 'connect' CLI command. So
3040 		 * when connecting to a hidden AP, the scan is just
3041 		 * sent out to the air when we know the desired
3042 		 * essid of the AP we want to connect.
3043 		 */
3044 		if (ic->ic_des_esslen) {
3045 			if (sc->sc_flags & IWK_F_RUNNING) {
3046 				iwk_m_stop(sc);
3047 				(void) iwk_m_start(sc);
3048 				(void) ieee80211_new_state(ic,
3049 				    IEEE80211_S_SCAN, -1);
3050 			}
3051 		}
3052 	}
3053 }
3054 
3055 /*
3056  * callback functions for set/get properties
3057  */
3058 /* ARGSUSED */
3059 static int
3060 iwk_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3061     uint_t pr_flags, uint_t wldp_length, void *wldp_buf, uint_t *perm)
3062 {
3063 	int		err = 0;
3064 	iwk_sc_t	*sc = (iwk_sc_t *)arg;
3065 
3066 	err = ieee80211_getprop(&sc->sc_ic, pr_name, wldp_pr_num,
3067 	    pr_flags, wldp_length, wldp_buf, perm);
3068 
3069 	return (err);
3070 }
3071 static int
3072 iwk_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3073     uint_t wldp_length, const void *wldp_buf)
3074 {
3075 	int		err;
3076 	iwk_sc_t	*sc = (iwk_sc_t *)arg;
3077 	ieee80211com_t	*ic = &sc->sc_ic;
3078 
3079 	err = ieee80211_setprop(ic, pr_name, wldp_pr_num, wldp_length,
3080 	    wldp_buf);
3081 
3082 	if (err == ENETRESET) {
3083 		if (ic->ic_des_esslen) {
3084 			if (sc->sc_flags & IWK_F_RUNNING) {
3085 				iwk_m_stop(sc);
3086 				(void) iwk_m_start(sc);
3087 				(void) ieee80211_new_state(ic,
3088 				    IEEE80211_S_SCAN, -1);
3089 			}
3090 		}
3091 		err = 0;
3092 	}
3093 
3094 	return (err);
3095 }
3096 
3097 /*ARGSUSED*/
3098 static int
3099 iwk_m_stat(void *arg, uint_t stat, uint64_t *val)
3100 {
3101 	iwk_sc_t	*sc  = (iwk_sc_t *)arg;
3102 	ieee80211com_t	*ic = &sc->sc_ic;
3103 	ieee80211_node_t *in;
3104 
3105 	mutex_enter(&sc->sc_glock);
3106 	switch (stat) {
3107 	case MAC_STAT_IFSPEED:
3108 		in = ic->ic_bss;
3109 		*val = ((ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) ?
3110 		    IEEE80211_RATE(in->in_txrate) :
3111 		    ic->ic_fixed_rate) / 2 * 1000000;
3112 		break;
3113 	case MAC_STAT_NOXMTBUF:
3114 		*val = sc->sc_tx_nobuf;
3115 		break;
3116 	case MAC_STAT_NORCVBUF:
3117 		*val = sc->sc_rx_nobuf;
3118 		break;
3119 	case MAC_STAT_IERRORS:
3120 		*val = sc->sc_rx_err;
3121 		break;
3122 	case MAC_STAT_RBYTES:
3123 		*val = ic->ic_stats.is_rx_bytes;
3124 		break;
3125 	case MAC_STAT_IPACKETS:
3126 		*val = ic->ic_stats.is_rx_frags;
3127 		break;
3128 	case MAC_STAT_OBYTES:
3129 		*val = ic->ic_stats.is_tx_bytes;
3130 		break;
3131 	case MAC_STAT_OPACKETS:
3132 		*val = ic->ic_stats.is_tx_frags;
3133 		break;
3134 	case MAC_STAT_OERRORS:
3135 	case WIFI_STAT_TX_FAILED:
3136 		*val = sc->sc_tx_err;
3137 		break;
3138 	case WIFI_STAT_TX_RETRANS:
3139 		*val = sc->sc_tx_retries;
3140 		break;
3141 	case WIFI_STAT_FCS_ERRORS:
3142 	case WIFI_STAT_WEP_ERRORS:
3143 	case WIFI_STAT_TX_FRAGS:
3144 	case WIFI_STAT_MCAST_TX:
3145 	case WIFI_STAT_RTS_SUCCESS:
3146 	case WIFI_STAT_RTS_FAILURE:
3147 	case WIFI_STAT_ACK_FAILURE:
3148 	case WIFI_STAT_RX_FRAGS:
3149 	case WIFI_STAT_MCAST_RX:
3150 	case WIFI_STAT_RX_DUPS:
3151 		mutex_exit(&sc->sc_glock);
3152 		return (ieee80211_stat(ic, stat, val));
3153 	default:
3154 		mutex_exit(&sc->sc_glock);
3155 		return (ENOTSUP);
3156 	}
3157 	mutex_exit(&sc->sc_glock);
3158 
3159 	return (IWK_SUCCESS);
3160 
3161 }
3162 
3163 static int
3164 iwk_m_start(void *arg)
3165 {
3166 	iwk_sc_t *sc = (iwk_sc_t *)arg;
3167 	ieee80211com_t	*ic = &sc->sc_ic;
3168 	int err;
3169 
3170 	err = iwk_init(sc);
3171 
3172 	if (err != IWK_SUCCESS) {
3173 		/*
3174 		 * The hw init err(eg. RF is OFF). Return Success to make
3175 		 * the 'plumb' succeed. The iwk_thread() tries to re-init
3176 		 * background.
3177 		 */
3178 		cmn_err(CE_WARN, "iwk_m_start(): failed to initialize "
3179 		    "hardware\n");
3180 		mutex_enter(&sc->sc_glock);
3181 		sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
3182 		mutex_exit(&sc->sc_glock);
3183 		return (IWK_SUCCESS);
3184 	}
3185 
3186 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3187 
3188 	mutex_enter(&sc->sc_glock);
3189 	sc->sc_flags |= IWK_F_RUNNING;
3190 	mutex_exit(&sc->sc_glock);
3191 
3192 	return (IWK_SUCCESS);
3193 }
3194 
3195 static void
3196 iwk_m_stop(void *arg)
3197 {
3198 	iwk_sc_t *sc = (iwk_sc_t *)arg;
3199 	ieee80211com_t	*ic = &sc->sc_ic;
3200 
3201 	iwk_stop(sc);
3202 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3203 	ieee80211_stop_watchdog(ic);
3204 	mutex_enter(&sc->sc_mt_lock);
3205 	sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
3206 	sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL;
3207 	mutex_exit(&sc->sc_mt_lock);
3208 	mutex_enter(&sc->sc_glock);
3209 	sc->sc_flags &= ~IWK_F_RUNNING;
3210 	mutex_exit(&sc->sc_glock);
3211 }
3212 
3213 /*ARGSUSED*/
3214 static int
3215 iwk_m_unicst(void *arg, const uint8_t *macaddr)
3216 {
3217 	iwk_sc_t *sc = (iwk_sc_t *)arg;
3218 	ieee80211com_t	*ic = &sc->sc_ic;
3219 	int err;
3220 
3221 	if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) {
3222 		IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr);
3223 		mutex_enter(&sc->sc_glock);
3224 		err = iwk_config(sc);
3225 		mutex_exit(&sc->sc_glock);
3226 		if (err != IWK_SUCCESS) {
3227 			cmn_err(CE_WARN,
3228 			    "iwk_m_unicst(): "
3229 			    "failed to configure device\n");
3230 			goto fail;
3231 		}
3232 	}
3233 	return (IWK_SUCCESS);
3234 fail:
3235 	return (err);
3236 }
3237 
3238 /*ARGSUSED*/
3239 static int
3240 iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m)
3241 {
3242 	return (IWK_SUCCESS);
3243 }
3244 
3245 /*ARGSUSED*/
3246 static int
3247 iwk_m_promisc(void *arg, boolean_t on)
3248 {
3249 	return (IWK_SUCCESS);
3250 }
3251 
3252 static void
3253 iwk_thread(iwk_sc_t *sc)
3254 {
3255 	ieee80211com_t	*ic = &sc->sc_ic;
3256 	clock_t clk;
3257 	int times = 0, err, n = 0, timeout = 0;
3258 	uint32_t tmp;
3259 
3260 	mutex_enter(&sc->sc_mt_lock);
3261 	while (sc->sc_mf_thread_switch) {
3262 		tmp = IWK_READ(sc, CSR_GP_CNTRL);
3263 		if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) {
3264 			sc->sc_flags &= ~IWK_F_RADIO_OFF;
3265 		} else {
3266 			sc->sc_flags |= IWK_F_RADIO_OFF;
3267 		}
3268 		/*
3269 		 * If in SUSPEND or the RF is OFF, do nothing
3270 		 */
3271 		if ((sc->sc_flags & IWK_F_SUSPEND) ||
3272 		    (sc->sc_flags & IWK_F_RADIO_OFF)) {
3273 			mutex_exit(&sc->sc_mt_lock);
3274 			delay(drv_usectohz(100000));
3275 			mutex_enter(&sc->sc_mt_lock);
3276 			continue;
3277 		}
3278 
3279 		/*
3280 		 * recovery fatal error
3281 		 */
3282 		if (ic->ic_mach &&
3283 		    (sc->sc_flags & IWK_F_HW_ERR_RECOVER)) {
3284 
3285 			IWK_DBG((IWK_DEBUG_FW,
3286 			    "iwk_thread(): "
3287 			    "try to recover fatal hw error: %d\n", times++));
3288 
3289 			iwk_stop(sc);
3290 
3291 			mutex_exit(&sc->sc_mt_lock);
3292 			ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3293 			delay(drv_usectohz(2000000 + n*500000));
3294 			mutex_enter(&sc->sc_mt_lock);
3295 
3296 			err = iwk_init(sc);
3297 			if (err != IWK_SUCCESS) {
3298 				n++;
3299 				if (n < 20)
3300 					continue;
3301 			}
3302 			n = 0;
3303 			if (!err)
3304 				sc->sc_flags |= IWK_F_RUNNING;
3305 			sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
3306 			mutex_exit(&sc->sc_mt_lock);
3307 			delay(drv_usectohz(2000000));
3308 			if (sc->sc_ostate != IEEE80211_S_INIT)
3309 				ieee80211_new_state(ic, IEEE80211_S_SCAN, 0);
3310 			mutex_enter(&sc->sc_mt_lock);
3311 		}
3312 
3313 		if (ic->ic_mach && (sc->sc_flags & IWK_F_LAZY_RESUME)) {
3314 			IWK_DBG((IWK_DEBUG_RESUME,
3315 			    "iwk_thread(): "
3316 			    "lazy resume\n"));
3317 			sc->sc_flags &= ~IWK_F_LAZY_RESUME;
3318 			mutex_exit(&sc->sc_mt_lock);
3319 			/*
3320 			 * NB: under WPA mode, this call hangs (door problem?)
3321 			 * when called in iwk_attach() and iwk_detach() while
3322 			 * system is in the procedure of CPR. To be safe, let
3323 			 * the thread do this.
3324 			 */
3325 			ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
3326 			mutex_enter(&sc->sc_mt_lock);
3327 		}
3328 
3329 		if (ic->ic_mach &&
3330 		    (sc->sc_flags & IWK_F_SCANNING) && sc->sc_scan_pending) {
3331 			IWK_DBG((IWK_DEBUG_SCAN,
3332 			    "iwk_thread(): "
3333 			    "wait for probe response\n"));
3334 			sc->sc_scan_pending--;
3335 			mutex_exit(&sc->sc_mt_lock);
3336 			delay(drv_usectohz(200000));
3337 			if (sc->sc_flags & IWK_F_SCANNING)
3338 				ieee80211_next_scan(ic);
3339 			mutex_enter(&sc->sc_mt_lock);
3340 		}
3341 
3342 		/*
3343 		 * rate ctl
3344 		 */
3345 		if (ic->ic_mach &&
3346 		    (sc->sc_flags & IWK_F_RATE_AUTO_CTL)) {
3347 			clk = ddi_get_lbolt();
3348 			if (clk > sc->sc_clk + drv_usectohz(500000)) {
3349 				iwk_amrr_timeout(sc);
3350 			}
3351 		}
3352 
3353 		mutex_exit(&sc->sc_mt_lock);
3354 		delay(drv_usectohz(100000));
3355 		mutex_enter(&sc->sc_mt_lock);
3356 
3357 		if (sc->sc_tx_timer) {
3358 			timeout++;
3359 			if (timeout == 10) {
3360 				sc->sc_tx_timer--;
3361 				if (sc->sc_tx_timer == 0) {
3362 					sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
3363 					sc->sc_ostate = IEEE80211_S_RUN;
3364 					IWK_DBG((IWK_DEBUG_FW,
3365 					    "iwk_thread(): try to recover from"
3366 					    " 'send fail\n"));
3367 				}
3368 				timeout = 0;
3369 			}
3370 		}
3371 
3372 	}
3373 	sc->sc_mf_thread = NULL;
3374 	cv_signal(&sc->sc_mt_cv);
3375 	mutex_exit(&sc->sc_mt_lock);
3376 }
3377 
3378 
3379 /*
3380  * Send a command to the firmware.
3381  */
3382 static int
3383 iwk_cmd(iwk_sc_t *sc, int code, const void *buf, int size, int async)
3384 {
3385 	iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM];
3386 	iwk_tx_desc_t *desc;
3387 	iwk_cmd_t *cmd;
3388 	clock_t clk;
3389 
3390 	ASSERT(size <= sizeof (cmd->data));
3391 	ASSERT(mutex_owned(&sc->sc_glock));
3392 
3393 	IWK_DBG((IWK_DEBUG_CMD, "iwk_cmd() code[%d]", code));
3394 	desc = ring->data[ring->cur].desc;
3395 	cmd = ring->data[ring->cur].cmd;
3396 
3397 	cmd->hdr.type = (uint8_t)code;
3398 	cmd->hdr.flags = 0;
3399 	cmd->hdr.qid = ring->qid;
3400 	cmd->hdr.idx = ring->cur;
3401 	(void) memcpy(cmd->data, buf, size);
3402 	(void) memset(desc, 0, sizeof (*desc));
3403 
3404 	desc->val0 = LE_32(1 << 24);
3405 	desc->pa[0].tb1_addr =
3406 	    (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff);
3407 	desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0;
3408 
3409 	/* kick cmd ring XXX */
3410 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3411 	    tfd_offset[ring->cur].val = 8;
3412 	if (ring->cur < IWK_MAX_WIN_SIZE) {
3413 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3414 		    tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8;
3415 	}
3416 	ring->cur = (ring->cur + 1) % ring->count;
3417 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3418 
3419 	if (async)
3420 		return (IWK_SUCCESS);
3421 	else {
3422 		sc->sc_flags &= ~IWK_F_CMD_DONE;
3423 		clk = ddi_get_lbolt() + drv_usectohz(2000000);
3424 		while (!(sc->sc_flags & IWK_F_CMD_DONE)) {
3425 			if (cv_timedwait(&sc->sc_cmd_cv, &sc->sc_glock, clk) <
3426 			    0)
3427 				break;
3428 		}
3429 		if (sc->sc_flags & IWK_F_CMD_DONE)
3430 			return (IWK_SUCCESS);
3431 		else
3432 			return (IWK_FAIL);
3433 	}
3434 }
3435 
3436 static void
3437 iwk_set_led(iwk_sc_t *sc, uint8_t id, uint8_t off, uint8_t on)
3438 {
3439 	iwk_led_cmd_t led;
3440 
3441 	led.interval = LE_32(100000);	/* unit: 100ms */
3442 	led.id = id;
3443 	led.off = off;
3444 	led.on = on;
3445 
3446 	(void) iwk_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1);
3447 }
3448 
3449 static int
3450 iwk_hw_set_before_auth(iwk_sc_t *sc)
3451 {
3452 	ieee80211com_t *ic = &sc->sc_ic;
3453 	ieee80211_node_t *in = ic->ic_bss;
3454 	iwk_add_sta_t node;
3455 	iwk_link_quality_cmd_t link_quality;
3456 	struct ieee80211_rateset rs;
3457 	uint16_t masks = 0, rate;
3458 	int i, err;
3459 
3460 	if (in->in_chan == IEEE80211_CHAN_ANYC) {
3461 		cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
3462 		    "channel (%d) isn't in proper range\n",
3463 		    ieee80211_chan2ieee(ic, in->in_chan));
3464 		return (IWK_FAIL);
3465 	}
3466 
3467 	/* update adapter's configuration according the info of target AP */
3468 	IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
3469 	sc->sc_config.chan = ieee80211_chan2ieee(ic, in->in_chan);
3470 	if (ic->ic_curmode == IEEE80211_MODE_11B) {
3471 		sc->sc_config.cck_basic_rates  = 0x03;
3472 		sc->sc_config.ofdm_basic_rates = 0;
3473 	} else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
3474 	    (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
3475 		sc->sc_config.cck_basic_rates  = 0;
3476 		sc->sc_config.ofdm_basic_rates = 0x15;
3477 	} else { /* assume 802.11b/g */
3478 		sc->sc_config.cck_basic_rates  = 0x0f;
3479 		sc->sc_config.ofdm_basic_rates = 0xff;
3480 	}
3481 
3482 	sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
3483 	    RXON_FLG_SHORT_SLOT_MSK);
3484 
3485 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
3486 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK);
3487 	else
3488 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK);
3489 
3490 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
3491 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
3492 	else
3493 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK);
3494 
3495 	IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x "
3496 	    "filter_flags %x  cck %x ofdm %x"
3497 	    " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n",
3498 	    sc->sc_config.chan, sc->sc_config.flags,
3499 	    sc->sc_config.filter_flags,
3500 	    sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates,
3501 	    sc->sc_config.bssid[0], sc->sc_config.bssid[1],
3502 	    sc->sc_config.bssid[2], sc->sc_config.bssid[3],
3503 	    sc->sc_config.bssid[4], sc->sc_config.bssid[5]));
3504 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
3505 	    sizeof (iwk_rxon_cmd_t), 1);
3506 	if (err != IWK_SUCCESS) {
3507 		cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
3508 		    " failed to config chan%d\n",
3509 		    sc->sc_config.chan);
3510 		return (err);
3511 	}
3512 
3513 	/* obtain current temperature of chipset */
3514 	sc->sc_tempera = iwk_curr_tempera(sc);
3515 
3516 	/* make Tx power calibration to determine the gains of DSP and radio */
3517 	err = iwk_tx_power_calibration(sc);
3518 	if (err) {
3519 		cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
3520 		    "failed to set tx power table\n");
3521 		return (err);
3522 	}
3523 
3524 	/* add default AP node */
3525 	(void) memset(&node, 0, sizeof (node));
3526 	IEEE80211_ADDR_COPY(node.bssid, in->in_bssid);
3527 	node.id = IWK_AP_ID;
3528 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
3529 	if (err != IWK_SUCCESS) {
3530 		cmn_err(CE_WARN, "iwk_hw_set_before_auth(): "
3531 		    "failed to add BSS node\n");
3532 		return (err);
3533 	}
3534 
3535 	/* TX_LINK_QUALITY cmd */
3536 	(void) memset(&link_quality, 0, sizeof (link_quality));
3537 	rs = ic->ic_sup_rates[ieee80211_chan2mode(ic, ic->ic_curchan)];
3538 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3539 		if (i < rs.ir_nrates)
3540 			rate = rs.ir_rates[rs.ir_nrates - i];
3541 		else
3542 			rate = 2;
3543 		if (rate == 2 || rate == 4 || rate == 11 || rate == 22)
3544 			masks |= RATE_MCS_CCK_MSK;
3545 		masks |= RATE_MCS_ANT_B_MSK;
3546 		masks &= ~RATE_MCS_ANT_A_MSK;
3547 		link_quality.rate_n_flags[i] =
3548 		    iwk_rate_to_plcp(rate) | masks;
3549 	}
3550 
3551 	link_quality.general_params.single_stream_ant_msk = 2;
3552 	link_quality.general_params.dual_stream_ant_msk = 3;
3553 	link_quality.agg_params.agg_dis_start_th = 3;
3554 	link_quality.agg_params.agg_time_limit = LE_16(4000);
3555 	link_quality.sta_id = IWK_AP_ID;
3556 	err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
3557 	    sizeof (link_quality), 1);
3558 	if (err != IWK_SUCCESS) {
3559 		cmn_err(CE_WARN, "iwk_hw_set_before_auth(): "
3560 		    "failed to config link quality table\n");
3561 		return (err);
3562 	}
3563 
3564 	return (IWK_SUCCESS);
3565 }
3566 
3567 /*
3568  * Send a scan request(assembly scan cmd) to the firmware.
3569  */
3570 static int
3571 iwk_scan(iwk_sc_t *sc)
3572 {
3573 	ieee80211com_t *ic = &sc->sc_ic;
3574 	iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM];
3575 	iwk_tx_desc_t *desc;
3576 	iwk_tx_data_t *data;
3577 	iwk_cmd_t *cmd;
3578 	iwk_scan_hdr_t *hdr;
3579 	iwk_scan_chan_t *chan;
3580 	struct ieee80211_frame *wh;
3581 	ieee80211_node_t *in = ic->ic_bss;
3582 	uint8_t essid[IEEE80211_NWID_LEN+1];
3583 	struct ieee80211_rateset *rs;
3584 	enum ieee80211_phymode mode;
3585 	uint8_t *frm;
3586 	int i, pktlen, nrates;
3587 
3588 	data = &ring->data[ring->cur];
3589 	desc = data->desc;
3590 	cmd = (iwk_cmd_t *)data->dma_data.mem_va;
3591 
3592 	cmd->hdr.type = REPLY_SCAN_CMD;
3593 	cmd->hdr.flags = 0;
3594 	cmd->hdr.qid = ring->qid;
3595 	cmd->hdr.idx = ring->cur | 0x40;
3596 
3597 	hdr = (iwk_scan_hdr_t *)cmd->data;
3598 	(void) memset(hdr, 0, sizeof (iwk_scan_hdr_t));
3599 	hdr->nchan = 1;
3600 	hdr->quiet_time = LE_16(50);
3601 	hdr->quiet_plcp_th = LE_16(1);
3602 
3603 	hdr->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
3604 	hdr->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3605 	    LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) |
3606 	    (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3607 	    (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3608 
3609 	hdr->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
3610 	hdr->tx_cmd.sta_id = IWK_BROADCAST_ID;
3611 	hdr->tx_cmd.stop_time.life_time = 0xffffffff;
3612 	hdr->tx_cmd.tx_flags |= (0x200);
3613 	hdr->tx_cmd.rate.r.rate_n_flags = iwk_rate_to_plcp(2);
3614 	hdr->tx_cmd.rate.r.rate_n_flags |=
3615 	    (RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK);
3616 	hdr->direct_scan[0].len = ic->ic_des_esslen;
3617 	hdr->direct_scan[0].id  = IEEE80211_ELEMID_SSID;
3618 
3619 	if (ic->ic_des_esslen) {
3620 		bcopy(ic->ic_des_essid, essid, ic->ic_des_esslen);
3621 		essid[ic->ic_des_esslen] = '\0';
3622 		IWK_DBG((IWK_DEBUG_SCAN, "directed scan %s\n", essid));
3623 
3624 		bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid,
3625 		    ic->ic_des_esslen);
3626 	} else {
3627 		bzero(hdr->direct_scan[0].ssid,
3628 		    sizeof (hdr->direct_scan[0].ssid));
3629 	}
3630 	/*
3631 	 * a probe request frame is required after the REPLY_SCAN_CMD
3632 	 */
3633 	wh = (struct ieee80211_frame *)(hdr + 1);
3634 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
3635 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
3636 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
3637 	(void) memset(wh->i_addr1, 0xff, 6);
3638 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr);
3639 	(void) memset(wh->i_addr3, 0xff, 6);
3640 	*(uint16_t *)&wh->i_dur[0] = 0;
3641 	*(uint16_t *)&wh->i_seq[0] = 0;
3642 
3643 	frm = (uint8_t *)(wh + 1);
3644 
3645 	/* essid IE */
3646 	if (in->in_esslen) {
3647 		bcopy(in->in_essid, essid, in->in_esslen);
3648 		essid[in->in_esslen] = '\0';
3649 		IWK_DBG((IWK_DEBUG_SCAN, "probe with ESSID %s\n",
3650 		    essid));
3651 	}
3652 	*frm++ = IEEE80211_ELEMID_SSID;
3653 	*frm++ = in->in_esslen;
3654 	(void) memcpy(frm, in->in_essid, in->in_esslen);
3655 	frm += in->in_esslen;
3656 
3657 	mode = ieee80211_chan2mode(ic, ic->ic_curchan);
3658 	rs = &ic->ic_sup_rates[mode];
3659 
3660 	/* supported rates IE */
3661 	*frm++ = IEEE80211_ELEMID_RATES;
3662 	nrates = rs->ir_nrates;
3663 	if (nrates > IEEE80211_RATE_SIZE)
3664 		nrates = IEEE80211_RATE_SIZE;
3665 	*frm++ = (uint8_t)nrates;
3666 	(void) memcpy(frm, rs->ir_rates, nrates);
3667 	frm += nrates;
3668 
3669 	/* supported xrates IE */
3670 	if (rs->ir_nrates > IEEE80211_RATE_SIZE) {
3671 		nrates = rs->ir_nrates - IEEE80211_RATE_SIZE;
3672 		*frm++ = IEEE80211_ELEMID_XRATES;
3673 		*frm++ = (uint8_t)nrates;
3674 		(void) memcpy(frm, rs->ir_rates + IEEE80211_RATE_SIZE, nrates);
3675 		frm += nrates;
3676 	}
3677 
3678 	/* optionnal IE (usually for wpa) */
3679 	if (ic->ic_opt_ie != NULL) {
3680 		(void) memcpy(frm, ic->ic_opt_ie, ic->ic_opt_ie_len);
3681 		frm += ic->ic_opt_ie_len;
3682 	}
3683 
3684 	/* setup length of probe request */
3685 	hdr->tx_cmd.len = LE_16(_PTRDIFF(frm, wh));
3686 	hdr->len = hdr->nchan * sizeof (iwk_scan_chan_t) +
3687 	    hdr->tx_cmd.len + sizeof (iwk_scan_hdr_t);
3688 
3689 	/*
3690 	 * the attribute of the scan channels are required after the probe
3691 	 * request frame.
3692 	 */
3693 	chan = (iwk_scan_chan_t *)frm;
3694 	for (i = 1; i <= hdr->nchan; i++, chan++) {
3695 		if (ic->ic_des_esslen) {
3696 			chan->type = 3;
3697 		} else {
3698 			chan->type = 1;
3699 		}
3700 
3701 		chan->chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
3702 		chan->tpc.tx_gain = 0x3f;
3703 		chan->tpc.dsp_atten = 110;
3704 		chan->active_dwell = LE_16(50);
3705 		chan->passive_dwell = LE_16(120);
3706 
3707 		frm += sizeof (iwk_scan_chan_t);
3708 	}
3709 
3710 	pktlen = _PTRDIFF(frm, cmd);
3711 
3712 	(void) memset(desc, 0, sizeof (*desc));
3713 	desc->val0 = LE_32(1 << 24);
3714 	desc->pa[0].tb1_addr =
3715 	    (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff);
3716 	desc->pa[0].val1 = (pktlen << 4) & 0xfff0;
3717 
3718 	/*
3719 	 * maybe for cmd, filling the byte cnt table is not necessary.
3720 	 * anyway, we fill it here.
3721 	 */
3722 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3723 	    tfd_offset[ring->cur].val = 8;
3724 	if (ring->cur < IWK_MAX_WIN_SIZE) {
3725 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3726 		    tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8;
3727 	}
3728 
3729 	/* kick cmd ring */
3730 	ring->cur = (ring->cur + 1) % ring->count;
3731 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3732 
3733 	return (IWK_SUCCESS);
3734 }
3735 
3736 static int
3737 iwk_config(iwk_sc_t *sc)
3738 {
3739 	ieee80211com_t *ic = &sc->sc_ic;
3740 	iwk_powertable_cmd_t powertable;
3741 	iwk_bt_cmd_t bt;
3742 	iwk_add_sta_t node;
3743 	iwk_link_quality_cmd_t link_quality;
3744 	int i, err;
3745 	uint16_t masks = 0;
3746 
3747 	/*
3748 	 * set power mode. Disable power management at present, do it later
3749 	 */
3750 	(void) memset(&powertable, 0, sizeof (powertable));
3751 	powertable.flags = LE_16(0x8);
3752 	err = iwk_cmd(sc, POWER_TABLE_CMD, &powertable,
3753 	    sizeof (powertable), 0);
3754 	if (err != IWK_SUCCESS) {
3755 		cmn_err(CE_WARN, "iwk_config(): failed to set power mode\n");
3756 		return (err);
3757 	}
3758 
3759 	/* configure bt coexistence */
3760 	(void) memset(&bt, 0, sizeof (bt));
3761 	bt.flags = 3;
3762 	bt.lead_time = 0xaa;
3763 	bt.max_kill = 1;
3764 	err = iwk_cmd(sc, REPLY_BT_CONFIG, &bt,
3765 	    sizeof (bt), 0);
3766 	if (err != IWK_SUCCESS) {
3767 		cmn_err(CE_WARN,
3768 		    "iwk_config(): "
3769 		    "failed to configurate bt coexistence\n");
3770 		return (err);
3771 	}
3772 
3773 	/* configure rxon */
3774 	(void) memset(&sc->sc_config, 0, sizeof (iwk_rxon_cmd_t));
3775 	IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
3776 	IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
3777 	sc->sc_config.chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
3778 	sc->sc_config.flags = (RXON_FLG_TSF2HOST_MSK |
3779 	    RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_BAND_24G_MSK);
3780 	sc->sc_config.flags &= (~RXON_FLG_CCK_MSK);
3781 	switch (ic->ic_opmode) {
3782 	case IEEE80211_M_STA:
3783 		sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
3784 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3785 		    RXON_FILTER_DIS_DECRYPT_MSK |
3786 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
3787 		break;
3788 	case IEEE80211_M_IBSS:
3789 	case IEEE80211_M_AHDEMO:
3790 		sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
3791 		sc->sc_config.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
3792 		sc->sc_config.filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3793 		    RXON_FILTER_DIS_DECRYPT_MSK |
3794 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
3795 		break;
3796 	case IEEE80211_M_HOSTAP:
3797 		sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
3798 		break;
3799 	case IEEE80211_M_MONITOR:
3800 		sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
3801 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3802 		    RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
3803 		break;
3804 	}
3805 	sc->sc_config.cck_basic_rates  = 0x0f;
3806 	sc->sc_config.ofdm_basic_rates = 0xff;
3807 
3808 	sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
3809 	sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff;
3810 
3811 	/* set antenna */
3812 
3813 	sc->sc_config.rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3814 	    LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) |
3815 	    (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3816 	    (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3817 
3818 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
3819 	    sizeof (iwk_rxon_cmd_t), 0);
3820 	if (err != IWK_SUCCESS) {
3821 		cmn_err(CE_WARN, "iwk_config(): "
3822 		    "failed to set configure command\n");
3823 		return (err);
3824 	}
3825 	/* obtain current temperature of chipset */
3826 	sc->sc_tempera = iwk_curr_tempera(sc);
3827 
3828 	/* make Tx power calibration to determine the gains of DSP and radio */
3829 	err = iwk_tx_power_calibration(sc);
3830 	if (err) {
3831 		cmn_err(CE_WARN, "iwk_config(): "
3832 		    "failed to set tx power table\n");
3833 		return (err);
3834 	}
3835 
3836 	/* add broadcast node so that we can send broadcast frame */
3837 	(void) memset(&node, 0, sizeof (node));
3838 	(void) memset(node.bssid, 0xff, 6);
3839 	node.id = IWK_BROADCAST_ID;
3840 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0);
3841 	if (err != IWK_SUCCESS) {
3842 		cmn_err(CE_WARN, "iwk_config(): "
3843 		    "failed to add broadcast node\n");
3844 		return (err);
3845 	}
3846 
3847 	/* TX_LINK_QUALITY cmd ? */
3848 	(void) memset(&link_quality, 0, sizeof (link_quality));
3849 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3850 		masks |= RATE_MCS_CCK_MSK;
3851 		masks |= RATE_MCS_ANT_B_MSK;
3852 		masks &= ~RATE_MCS_ANT_A_MSK;
3853 		link_quality.rate_n_flags[i] = iwk_rate_to_plcp(2) | masks;
3854 	}
3855 
3856 	link_quality.general_params.single_stream_ant_msk = 2;
3857 	link_quality.general_params.dual_stream_ant_msk = 3;
3858 	link_quality.agg_params.agg_dis_start_th = 3;
3859 	link_quality.agg_params.agg_time_limit = LE_16(4000);
3860 	link_quality.sta_id = IWK_BROADCAST_ID;
3861 	err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
3862 	    sizeof (link_quality), 0);
3863 	if (err != IWK_SUCCESS) {
3864 		cmn_err(CE_WARN, "iwk_config(): "
3865 		    "failed to config link quality table\n");
3866 		return (err);
3867 	}
3868 
3869 	return (IWK_SUCCESS);
3870 }
3871 
3872 static void
3873 iwk_stop_master(iwk_sc_t *sc)
3874 {
3875 	uint32_t tmp;
3876 	int n;
3877 
3878 	tmp = IWK_READ(sc, CSR_RESET);
3879 	IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER);
3880 
3881 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
3882 	if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) ==
3883 	    CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE)
3884 		return;
3885 
3886 	for (n = 0; n < 2000; n++) {
3887 		if (IWK_READ(sc, CSR_RESET) &
3888 		    CSR_RESET_REG_FLAG_MASTER_DISABLED)
3889 			break;
3890 		DELAY(1000);
3891 	}
3892 	if (n == 2000)
3893 		IWK_DBG((IWK_DEBUG_HW,
3894 		    "timeout waiting for master stop\n"));
3895 }
3896 
3897 static int
3898 iwk_power_up(iwk_sc_t *sc)
3899 {
3900 	uint32_t tmp;
3901 
3902 	iwk_mac_access_enter(sc);
3903 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3904 	tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC;
3905 	tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN;
3906 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3907 	iwk_mac_access_exit(sc);
3908 
3909 	DELAY(5000);
3910 	return (IWK_SUCCESS);
3911 }
3912 
3913 static int
3914 iwk_preinit(iwk_sc_t *sc)
3915 {
3916 	uint32_t tmp;
3917 	int n;
3918 	uint8_t vlink;
3919 
3920 	/* clear any pending interrupts */
3921 	IWK_WRITE(sc, CSR_INT, 0xffffffff);
3922 
3923 	tmp = IWK_READ(sc, CSR_GIO_CHICKEN_BITS);
3924 	IWK_WRITE(sc, CSR_GIO_CHICKEN_BITS,
3925 	    tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
3926 
3927 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
3928 	IWK_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
3929 
3930 	/* wait for clock ready */
3931 	for (n = 0; n < 1000; n++) {
3932 		if (IWK_READ(sc, CSR_GP_CNTRL) &
3933 		    CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY)
3934 			break;
3935 		DELAY(10);
3936 	}
3937 	if (n == 1000) {
3938 		cmn_err(CE_WARN,
3939 		    "iwk_preinit(): timeout waiting for clock ready\n");
3940 		return (ETIMEDOUT);
3941 	}
3942 	iwk_mac_access_enter(sc);
3943 	tmp = iwk_reg_read(sc, APMG_CLK_CTRL_REG);
3944 	iwk_reg_write(sc, APMG_CLK_CTRL_REG, tmp |
3945 	    APMG_CLK_REG_VAL_DMA_CLK_RQT | APMG_CLK_REG_VAL_BSM_CLK_RQT);
3946 
3947 	DELAY(20);
3948 	tmp = iwk_reg_read(sc, ALM_APMG_PCIDEV_STT);
3949 	iwk_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp |
3950 	    APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE);
3951 	iwk_mac_access_exit(sc);
3952 
3953 	IWK_WRITE(sc, CSR_INT_COALESCING, 512 / 32); /* ??? */
3954 
3955 	(void) iwk_power_up(sc);
3956 
3957 	if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) {
3958 		tmp = ddi_get32(sc->sc_cfg_handle,
3959 		    (uint32_t *)(sc->sc_cfg_base + 0xe8));
3960 		ddi_put32(sc->sc_cfg_handle,
3961 		    (uint32_t *)(sc->sc_cfg_base + 0xe8),
3962 		    tmp & ~(1 << 11));
3963 	}
3964 
3965 
3966 	vlink = ddi_get8(sc->sc_cfg_handle,
3967 	    (uint8_t *)(sc->sc_cfg_base + 0xf0));
3968 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0),
3969 	    vlink & ~2);
3970 
3971 	tmp = IWK_READ(sc, CSR_SW_VER);
3972 	tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
3973 	    CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
3974 	    CSR_HW_IF_CONFIG_REG_BIT_KEDRON_R;
3975 	IWK_WRITE(sc, CSR_SW_VER, tmp);
3976 
3977 	/* make sure power supply on each part of the hardware */
3978 	iwk_mac_access_enter(sc);
3979 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3980 	tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
3981 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3982 	DELAY(5);
3983 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3984 	tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
3985 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3986 	iwk_mac_access_exit(sc);
3987 	return (IWK_SUCCESS);
3988 }
3989 
3990 /*
3991  * set up semphore flag to own EEPROM
3992  */
3993 static int iwk_eep_sem_down(iwk_sc_t *sc)
3994 {
3995 	int count1, count2;
3996 	uint32_t tmp;
3997 
3998 	for (count1 = 0; count1 < 1000; count1++) {
3999 		tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG);
4000 		IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4001 		    tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM);
4002 
4003 		for (count2 = 0; count2 < 2; count2++) {
4004 			if (IWK_READ(sc, CSR_HW_IF_CONFIG_REG) &
4005 			    CSR_HW_IF_CONFIG_REG_EEP_SEM)
4006 				return (IWK_SUCCESS);
4007 			DELAY(10000);
4008 		}
4009 	}
4010 	return (IWK_FAIL);
4011 }
4012 
4013 /*
4014  * reset semphore flag to release EEPROM
4015  */
4016 static void iwk_eep_sem_up(iwk_sc_t *sc)
4017 {
4018 	uint32_t tmp;
4019 
4020 	tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG);
4021 	IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4022 	    tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM));
4023 }
4024 
4025 /*
4026  * This function load all infomation in eeprom into iwk_eep
4027  * structure in iwk_sc_t structure
4028  */
4029 static int iwk_eep_load(iwk_sc_t *sc)
4030 {
4031 	int i, rr;
4032 	uint32_t rv, tmp, eep_gp;
4033 	uint16_t addr, eep_sz = sizeof (sc->sc_eep_map);
4034 	uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map;
4035 
4036 	/* read eeprom gp register in CSR */
4037 	eep_gp = IWK_READ(sc, CSR_EEPROM_GP);
4038 	if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) ==
4039 	    CSR_EEPROM_GP_BAD_SIGNATURE) {
4040 		cmn_err(CE_WARN, "EEPROM not found\n");
4041 		return (IWK_FAIL);
4042 	}
4043 
4044 	rr = iwk_eep_sem_down(sc);
4045 	if (rr != 0) {
4046 		cmn_err(CE_WARN, "failed to own EEPROM\n");
4047 		return (IWK_FAIL);
4048 	}
4049 
4050 	for (addr = 0; addr < eep_sz; addr += 2) {
4051 		IWK_WRITE(sc, CSR_EEPROM_REG, addr<<1);
4052 		tmp = IWK_READ(sc, CSR_EEPROM_REG);
4053 		IWK_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2));
4054 
4055 		for (i = 0; i < 10; i++) {
4056 			rv = IWK_READ(sc, CSR_EEPROM_REG);
4057 			if (rv & 1)
4058 				break;
4059 			DELAY(10);
4060 		}
4061 
4062 		if (!(rv & 1)) {
4063 			cmn_err(CE_WARN, "time out when read EEPROM\n");
4064 			iwk_eep_sem_up(sc);
4065 			return (IWK_FAIL);
4066 		}
4067 
4068 		eep_p[addr/2] = rv >> 16;
4069 	}
4070 
4071 	iwk_eep_sem_up(sc);
4072 	return (IWK_SUCCESS);
4073 }
4074 
4075 /*
4076  * init mac address in ieee80211com_t struct
4077  */
4078 static void iwk_get_mac_from_eep(iwk_sc_t *sc)
4079 {
4080 	ieee80211com_t *ic = &sc->sc_ic;
4081 	struct iwk_eep *ep = &sc->sc_eep_map;
4082 
4083 	IEEE80211_ADDR_COPY(ic->ic_macaddr, ep->mac_address);
4084 
4085 	IWK_DBG((IWK_DEBUG_EEPROM, "mac:%2x:%2x:%2x:%2x:%2x:%2x\n",
4086 	    ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2],
4087 	    ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5]));
4088 }
4089 
4090 static int
4091 iwk_init(iwk_sc_t *sc)
4092 {
4093 	int qid, n, err;
4094 	clock_t clk;
4095 	uint32_t tmp;
4096 
4097 	mutex_enter(&sc->sc_glock);
4098 	sc->sc_flags &= ~IWK_F_FW_INIT;
4099 
4100 	(void) iwk_preinit(sc);
4101 
4102 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
4103 	if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) {
4104 		cmn_err(CE_WARN, "iwk_init(): Radio transmitter is off\n");
4105 		goto fail1;
4106 	}
4107 
4108 	/* init Rx ring */
4109 	iwk_mac_access_enter(sc);
4110 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
4111 
4112 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
4113 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
4114 	    sc->sc_rxq.dma_desc.cookie.dmac_address >> 8);
4115 
4116 	IWK_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG,
4117 	    ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address +
4118 	    offsetof(struct iwk_shared, val0)) >> 4));
4119 
4120 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG,
4121 	    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
4122 	    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
4123 	    IWK_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
4124 	    (RX_QUEUE_SIZE_LOG <<
4125 	    FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
4126 	iwk_mac_access_exit(sc);
4127 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG,
4128 	    (RX_QUEUE_SIZE - 1) & ~0x7);
4129 
4130 	/* init Tx rings */
4131 	iwk_mac_access_enter(sc);
4132 	iwk_reg_write(sc, SCD_TXFACT, 0);
4133 
4134 	/* keep warm page */
4135 	iwk_reg_write(sc, IWK_FH_KW_MEM_ADDR_REG,
4136 	    sc->sc_dma_kw.cookie.dmac_address >> 4);
4137 
4138 	for (qid = 0; qid < IWK_NUM_QUEUES; qid++) {
4139 		IWK_WRITE(sc, FH_MEM_CBBC_QUEUE(qid),
4140 		    sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8);
4141 		IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(qid),
4142 		    IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
4143 		    IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
4144 	}
4145 	iwk_mac_access_exit(sc);
4146 
4147 	/* clear "radio off" and "disable command" bits */
4148 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
4149 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR,
4150 	    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4151 
4152 	/* clear any pending interrupts */
4153 	IWK_WRITE(sc, CSR_INT, 0xffffffff);
4154 
4155 	/* enable interrupts */
4156 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
4157 
4158 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
4159 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
4160 
4161 	/*
4162 	 * backup ucode data part for future use.
4163 	 */
4164 	(void) memcpy(sc->sc_dma_fw_data_bak.mem_va,
4165 	    sc->sc_dma_fw_data.mem_va,
4166 	    sc->sc_dma_fw_data.alength);
4167 
4168 	for (n = 0; n < 2; n++) {
4169 		/* load firmware init segment into NIC */
4170 		err = iwk_load_firmware(sc);
4171 		if (err != IWK_SUCCESS) {
4172 			cmn_err(CE_WARN, "iwk_init(): "
4173 			    "failed to setup boot firmware\n");
4174 			continue;
4175 		}
4176 
4177 		/* now press "execute" start running */
4178 		IWK_WRITE(sc, CSR_RESET, 0);
4179 		break;
4180 	}
4181 	if (n == 2) {
4182 		cmn_err(CE_WARN, "iwk_init(): failed to load firmware\n");
4183 		goto fail1;
4184 	}
4185 	/* ..and wait at most one second for adapter to initialize */
4186 	clk = ddi_get_lbolt() + drv_usectohz(2000000);
4187 	while (!(sc->sc_flags & IWK_F_FW_INIT)) {
4188 		if (cv_timedwait(&sc->sc_fw_cv, &sc->sc_glock, clk) < 0)
4189 			break;
4190 	}
4191 	if (!(sc->sc_flags & IWK_F_FW_INIT)) {
4192 		cmn_err(CE_WARN,
4193 		    "iwk_init(): timeout waiting for firmware init\n");
4194 		goto fail1;
4195 	}
4196 
4197 	/*
4198 	 * at this point, the firmware is loaded OK, then config the hardware
4199 	 * with the ucode API, including rxon, txpower, etc.
4200 	 */
4201 	err = iwk_config(sc);
4202 	if (err) {
4203 		cmn_err(CE_WARN, "iwk_init(): failed to configure device\n");
4204 		goto fail1;
4205 	}
4206 
4207 	/* at this point, hardware may receive beacons :) */
4208 	mutex_exit(&sc->sc_glock);
4209 	return (IWK_SUCCESS);
4210 
4211 fail1:
4212 	err = IWK_FAIL;
4213 	mutex_exit(&sc->sc_glock);
4214 	return (err);
4215 }
4216 
4217 static void
4218 iwk_stop(iwk_sc_t *sc)
4219 {
4220 	uint32_t tmp;
4221 	int i;
4222 
4223 	if (!(sc->sc_flags & IWK_F_QUIESCED))
4224 		mutex_enter(&sc->sc_glock);
4225 
4226 	IWK_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
4227 	/* disable interrupts */
4228 	IWK_WRITE(sc, CSR_INT_MASK, 0);
4229 	IWK_WRITE(sc, CSR_INT, CSR_INI_SET_MASK);
4230 	IWK_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff);
4231 
4232 	/* reset all Tx rings */
4233 	for (i = 0; i < IWK_NUM_QUEUES; i++)
4234 		iwk_reset_tx_ring(sc, &sc->sc_txq[i]);
4235 
4236 	/* reset Rx ring */
4237 	iwk_reset_rx_ring(sc);
4238 
4239 	iwk_mac_access_enter(sc);
4240 	iwk_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4241 	iwk_mac_access_exit(sc);
4242 
4243 	DELAY(5);
4244 
4245 	iwk_stop_master(sc);
4246 
4247 	sc->sc_tx_timer = 0;
4248 	sc->sc_flags &= ~IWK_F_SCANNING;
4249 	sc->sc_scan_pending = 0;
4250 
4251 	tmp = IWK_READ(sc, CSR_RESET);
4252 	IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET);
4253 
4254 	if (!(sc->sc_flags & IWK_F_QUIESCED))
4255 		mutex_exit(&sc->sc_glock);
4256 }
4257 
4258 /*
4259  * Naive implementation of the Adaptive Multi Rate Retry algorithm:
4260  * "IEEE 802.11 Rate Adaptation: A Practical Approach"
4261  * Mathieu Lacage, Hossein Manshaei, Thierry Turletti
4262  * INRIA Sophia - Projet Planete
4263  * http://www-sop.inria.fr/rapports/sophia/RR-5208.html
4264  */
4265 #define	is_success(amrr)	\
4266 	((amrr)->retrycnt < (amrr)->txcnt / 10)
4267 #define	is_failure(amrr)	\
4268 	((amrr)->retrycnt > (amrr)->txcnt / 3)
4269 #define	is_enough(amrr)		\
4270 	((amrr)->txcnt > 100)
4271 #define	is_min_rate(in)		\
4272 	((in)->in_txrate == 0)
4273 #define	is_max_rate(in)		\
4274 	((in)->in_txrate == (in)->in_rates.ir_nrates - 1)
4275 #define	increase_rate(in)	\
4276 	((in)->in_txrate++)
4277 #define	decrease_rate(in)	\
4278 	((in)->in_txrate--)
4279 #define	reset_cnt(amrr)		\
4280 	{ (amrr)->txcnt = (amrr)->retrycnt = 0; }
4281 
4282 #define	IWK_AMRR_MIN_SUCCESS_THRESHOLD	 1
4283 #define	IWK_AMRR_MAX_SUCCESS_THRESHOLD	15
4284 
4285 static void
4286 iwk_amrr_init(iwk_amrr_t *amrr)
4287 {
4288 	amrr->success = 0;
4289 	amrr->recovery = 0;
4290 	amrr->txcnt = amrr->retrycnt = 0;
4291 	amrr->success_threshold = IWK_AMRR_MIN_SUCCESS_THRESHOLD;
4292 }
4293 
4294 static void
4295 iwk_amrr_timeout(iwk_sc_t *sc)
4296 {
4297 	ieee80211com_t *ic = &sc->sc_ic;
4298 
4299 	IWK_DBG((IWK_DEBUG_RATECTL, "iwk_amrr_timeout() enter\n"));
4300 	if (ic->ic_opmode == IEEE80211_M_STA)
4301 		iwk_amrr_ratectl(NULL, ic->ic_bss);
4302 	else
4303 		ieee80211_iterate_nodes(&ic->ic_sta, iwk_amrr_ratectl, NULL);
4304 	sc->sc_clk = ddi_get_lbolt();
4305 }
4306 
4307 /* ARGSUSED */
4308 static void
4309 iwk_amrr_ratectl(void *arg, ieee80211_node_t *in)
4310 {
4311 	iwk_amrr_t *amrr = (iwk_amrr_t *)in;
4312 	int need_change = 0;
4313 
4314 	if (is_success(amrr) && is_enough(amrr)) {
4315 		amrr->success++;
4316 		if (amrr->success >= amrr->success_threshold &&
4317 		    !is_max_rate(in)) {
4318 			amrr->recovery = 1;
4319 			amrr->success = 0;
4320 			increase_rate(in);
4321 			IWK_DBG((IWK_DEBUG_RATECTL,
4322 			    "AMRR increasing rate %d (txcnt=%d retrycnt=%d)\n",
4323 			    in->in_txrate, amrr->txcnt, amrr->retrycnt));
4324 			need_change = 1;
4325 		} else {
4326 			amrr->recovery = 0;
4327 		}
4328 	} else if (is_failure(amrr)) {
4329 		amrr->success = 0;
4330 		if (!is_min_rate(in)) {
4331 			if (amrr->recovery) {
4332 				amrr->success_threshold++;
4333 				if (amrr->success_threshold >
4334 				    IWK_AMRR_MAX_SUCCESS_THRESHOLD)
4335 					amrr->success_threshold =
4336 					    IWK_AMRR_MAX_SUCCESS_THRESHOLD;
4337 			} else {
4338 				amrr->success_threshold =
4339 				    IWK_AMRR_MIN_SUCCESS_THRESHOLD;
4340 			}
4341 			decrease_rate(in);
4342 			IWK_DBG((IWK_DEBUG_RATECTL,
4343 			    "AMRR decreasing rate %d (txcnt=%d retrycnt=%d)\n",
4344 			    in->in_txrate, amrr->txcnt, amrr->retrycnt));
4345 			need_change = 1;
4346 		}
4347 		amrr->recovery = 0;	/* paper is incorrect */
4348 	}
4349 
4350 	if (is_enough(amrr) || need_change)
4351 		reset_cnt(amrr);
4352 }
4353 
4354 /*
4355  * calculate 4965 chipset's kelvin temperature according to
4356  * the data of init alive and satistics notification.
4357  * The details is described in iwk_calibration.h file
4358  */
4359 static int32_t iwk_curr_tempera(iwk_sc_t *sc)
4360 {
4361 	int32_t  tempera;
4362 	int32_t  r1, r2, r3;
4363 	uint32_t  r4_u;
4364 	int32_t   r4_s;
4365 
4366 	if (iwk_is_fat_channel(sc)) {
4367 		r1 = (int32_t)(sc->sc_card_alive_init.therm_r1[1]);
4368 		r2 = (int32_t)(sc->sc_card_alive_init.therm_r2[1]);
4369 		r3 = (int32_t)(sc->sc_card_alive_init.therm_r3[1]);
4370 		r4_u = sc->sc_card_alive_init.therm_r4[1];
4371 	} else {
4372 		r1 = (int32_t)(sc->sc_card_alive_init.therm_r1[0]);
4373 		r2 = (int32_t)(sc->sc_card_alive_init.therm_r2[0]);
4374 		r3 = (int32_t)(sc->sc_card_alive_init.therm_r3[0]);
4375 		r4_u = sc->sc_card_alive_init.therm_r4[0];
4376 	}
4377 
4378 	if (sc->sc_flags & IWK_F_STATISTICS) {
4379 		r4_s = (int32_t)(sc->sc_statistics.general.temperature <<
4380 		    (31-23)) >> (31-23);
4381 	} else {
4382 		r4_s = (int32_t)(r4_u << (31-23)) >> (31-23);
4383 	}
4384 
4385 	IWK_DBG((IWK_DEBUG_CALIBRATION, "temperature R[1-4]: %d %d %d %d\n",
4386 	    r1, r2, r3, r4_s));
4387 
4388 	if (r3 == r1) {
4389 		cmn_err(CE_WARN, "iwk_curr_tempera(): "
4390 		    "failed to calculate temperature"
4391 		    "because r3 = r1\n");
4392 		return (DDI_FAILURE);
4393 	}
4394 
4395 	tempera = TEMPERATURE_CALIB_A_VAL * (r4_s - r2);
4396 	tempera /= (r3 - r1);
4397 	tempera = (tempera*97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
4398 
4399 	IWK_DBG((IWK_DEBUG_CALIBRATION, "calculated temperature: %dK, %dC\n",
4400 	    tempera, KELVIN_TO_CELSIUS(tempera)));
4401 
4402 	return (tempera);
4403 }
4404 
4405 /* Determine whether 4965 is using 2.4 GHz band */
4406 static inline int iwk_is_24G_band(iwk_sc_t *sc)
4407 {
4408 	return (sc->sc_config.flags & RXON_FLG_BAND_24G_MSK);
4409 }
4410 
4411 /* Determine whether 4965 is using fat channel */
4412 static inline int iwk_is_fat_channel(iwk_sc_t *sc)
4413 {
4414 	return ((sc->sc_config.flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
4415 	    (sc->sc_config.flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK));
4416 }
4417 
4418 /*
4419  * In MIMO mode, determine which group 4965's current channel belong to.
4420  * For more infomation about "channel group",
4421  * please refer to iwk_calibration.h file
4422  */
4423 static int iwk_txpower_grp(uint16_t channel)
4424 {
4425 	if (channel >= CALIB_IWK_TX_ATTEN_GR5_FCH &&
4426 	    channel <= CALIB_IWK_TX_ATTEN_GR5_LCH) {
4427 		return (CALIB_CH_GROUP_5);
4428 	}
4429 
4430 	if (channel >= CALIB_IWK_TX_ATTEN_GR1_FCH &&
4431 	    channel <= CALIB_IWK_TX_ATTEN_GR1_LCH) {
4432 		return (CALIB_CH_GROUP_1);
4433 	}
4434 
4435 	if (channel >= CALIB_IWK_TX_ATTEN_GR2_FCH &&
4436 	    channel <= CALIB_IWK_TX_ATTEN_GR2_LCH) {
4437 		return (CALIB_CH_GROUP_2);
4438 	}
4439 
4440 	if (channel >= CALIB_IWK_TX_ATTEN_GR3_FCH &&
4441 	    channel <= CALIB_IWK_TX_ATTEN_GR3_LCH) {
4442 		return (CALIB_CH_GROUP_3);
4443 	}
4444 
4445 	if (channel >= CALIB_IWK_TX_ATTEN_GR4_FCH &&
4446 	    channel <= CALIB_IWK_TX_ATTEN_GR4_LCH) {
4447 		return (CALIB_CH_GROUP_4);
4448 	}
4449 
4450 	cmn_err(CE_WARN, "iwk_txpower_grp(): "
4451 	    "can't find txpower group for channel %d.\n", channel);
4452 
4453 	return (DDI_FAILURE);
4454 }
4455 
4456 /* 2.4 GHz */
4457 static uint16_t iwk_eep_band_1[14] = {
4458 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
4459 };
4460 
4461 /* 5.2 GHz bands */
4462 static uint16_t iwk_eep_band_2[13] = {
4463 	183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
4464 };
4465 
4466 static uint16_t iwk_eep_band_3[12] = {
4467 	34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
4468 };
4469 
4470 static uint16_t iwk_eep_band_4[11] = {
4471 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
4472 };
4473 
4474 static uint16_t iwk_eep_band_5[6] = {
4475 	145, 149, 153, 157, 161, 165
4476 };
4477 
4478 static uint16_t iwk_eep_band_6[7] = {
4479 	1, 2, 3, 4, 5, 6, 7
4480 };
4481 
4482 static uint16_t iwk_eep_band_7[11] = {
4483 	36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
4484 };
4485 
4486 /* Get regulatory data from eeprom for a given channel */
4487 static struct iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc,
4488     uint16_t channel,
4489     int is_24G, int is_fat, int is_hi_chan)
4490 {
4491 	int32_t i;
4492 	uint16_t chan;
4493 
4494 	if (is_fat) {  /* 11n mode */
4495 
4496 		if (is_hi_chan) {
4497 			chan = channel - 4;
4498 		} else {
4499 			chan = channel;
4500 		}
4501 
4502 		for (i = 0; i < 7; i++) {
4503 			if (iwk_eep_band_6[i] == chan) {
4504 				return (&sc->sc_eep_map.band_24_channels[i]);
4505 			}
4506 		}
4507 		for (i = 0; i < 11; i++) {
4508 			if (iwk_eep_band_7[i] == chan) {
4509 				return (&sc->sc_eep_map.band_52_channels[i]);
4510 			}
4511 		}
4512 	} else if (is_24G) {  /* 2.4 GHz band */
4513 		for (i = 0; i < 14; i++) {
4514 			if (iwk_eep_band_1[i] == channel) {
4515 				return (&sc->sc_eep_map.band_1_channels[i]);
4516 			}
4517 		}
4518 	} else {  /* 5 GHz band */
4519 		for (i = 0; i < 13; i++) {
4520 			if (iwk_eep_band_2[i] == channel) {
4521 				return (&sc->sc_eep_map.band_2_channels[i]);
4522 			}
4523 		}
4524 		for (i = 0; i < 12; i++) {
4525 			if (iwk_eep_band_3[i] == channel) {
4526 				return (&sc->sc_eep_map.band_3_channels[i]);
4527 			}
4528 		}
4529 		for (i = 0; i < 11; i++) {
4530 			if (iwk_eep_band_4[i] == channel) {
4531 				return (&sc->sc_eep_map.band_4_channels[i]);
4532 			}
4533 		}
4534 		for (i = 0; i < 6; i++) {
4535 			if (iwk_eep_band_5[i] == channel) {
4536 				return (&sc->sc_eep_map.band_5_channels[i]);
4537 			}
4538 		}
4539 	}
4540 
4541 	return (NULL);
4542 }
4543 
4544 /*
4545  * Determine which subband a given channel belongs
4546  * to in 2.4 GHz or 5 GHz band
4547  */
4548 static int32_t iwk_band_number(iwk_sc_t *sc, uint16_t channel)
4549 {
4550 	int32_t b_n = -1;
4551 
4552 	for (b_n = 0; b_n < EEP_TX_POWER_BANDS; b_n++) {
4553 		if (0 == sc->sc_eep_map.calib_info.band_info_tbl[b_n].ch_from) {
4554 			continue;
4555 		}
4556 
4557 		if ((channel >=
4558 		    (uint16_t)sc->sc_eep_map.calib_info.
4559 		    band_info_tbl[b_n].ch_from) &&
4560 		    (channel <=
4561 		    (uint16_t)sc->sc_eep_map.calib_info.
4562 		    band_info_tbl[b_n].ch_to)) {
4563 			break;
4564 		}
4565 	}
4566 
4567 	return (b_n);
4568 }
4569 
4570 /* Make a special division for interpolation operation */
4571 static int iwk_division(int32_t num, int32_t denom, int32_t *res)
4572 {
4573 	int32_t sign = 1;
4574 
4575 	if (num < 0) {
4576 		sign = -sign;
4577 		num = -num;
4578 	}
4579 
4580 	if (denom < 0) {
4581 		sign = -sign;
4582 		denom = -denom;
4583 	}
4584 
4585 	*res = ((num*2 + denom) / (denom*2)) * sign;
4586 
4587 	return (IWK_SUCCESS);
4588 }
4589 
4590 /* Make interpolation operation */
4591 static int32_t iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1,
4592     int32_t x2, int32_t y2)
4593 {
4594 	int32_t val;
4595 
4596 	if (x2 == x1) {
4597 		return (y1);
4598 	} else {
4599 		(void) iwk_division((x2-x)*(y1-y2), (x2-x1), &val);
4600 		return (val + y2);
4601 	}
4602 }
4603 
4604 /* Get interpolation measurement data of a given channel for all chains. */
4605 static int iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel,
4606     struct iwk_eep_calib_channel_info *chan_info)
4607 {
4608 	int32_t ban_n;
4609 	uint32_t ch1_n, ch2_n;
4610 	int32_t c, m;
4611 	struct iwk_eep_calib_measure *m1_p, *m2_p, *m_p;
4612 
4613 	/* determine subband number */
4614 	ban_n = iwk_band_number(sc, channel);
4615 	if (ban_n >= EEP_TX_POWER_BANDS) {
4616 		return (DDI_FAILURE);
4617 	}
4618 
4619 	ch1_n =
4620 	    (uint32_t)sc->sc_eep_map.calib_info.band_info_tbl[ban_n].ch1.ch_num;
4621 	ch2_n =
4622 	    (uint32_t)sc->sc_eep_map.calib_info.band_info_tbl[ban_n].ch2.ch_num;
4623 
4624 	chan_info->ch_num = (uint8_t)channel;  /* given channel number */
4625 
4626 	/*
4627 	 * go through all chains on chipset
4628 	 */
4629 	for (c = 0; c < EEP_TX_POWER_TX_CHAINS; c++) {
4630 		/*
4631 		 * go through all factory measurements
4632 		 */
4633 		for (m = 0; m < EEP_TX_POWER_MEASUREMENTS; m++) {
4634 			m1_p =
4635 			    &(sc->sc_eep_map.calib_info.
4636 			    band_info_tbl[ban_n].ch1.measure[c][m]);
4637 			m2_p =
4638 			    &(sc->sc_eep_map.calib_info.band_info_tbl[ban_n].
4639 			    ch2.measure[c][m]);
4640 			m_p = &(chan_info->measure[c][m]);
4641 
4642 			/*
4643 			 * make interpolation to get actual
4644 			 * Tx power for given channel
4645 			 */
4646 			m_p->actual_pow = iwk_interpolate_value(channel,
4647 			    ch1_n, m1_p->actual_pow,
4648 			    ch2_n, m2_p->actual_pow);
4649 
4650 			/* make interpolation to get index into gain table */
4651 			m_p->gain_idx = iwk_interpolate_value(channel,
4652 			    ch1_n, m1_p->gain_idx,
4653 			    ch2_n, m2_p->gain_idx);
4654 
4655 			/* make interpolation to get chipset temperature */
4656 			m_p->temperature = iwk_interpolate_value(channel,
4657 			    ch1_n, m1_p->temperature,
4658 			    ch2_n, m2_p->temperature);
4659 
4660 			/*
4661 			 * make interpolation to get power
4662 			 * amp detector level
4663 			 */
4664 			m_p->pa_det = iwk_interpolate_value(channel, ch1_n,
4665 			    m1_p->pa_det,
4666 			    ch2_n, m2_p->pa_det);
4667 		}
4668 	}
4669 
4670 	return (IWK_SUCCESS);
4671 }
4672 
4673 /*
4674  * Calculate voltage compensation for Tx power. For more infomation,
4675  * please refer to iwk_calibration.h file
4676  */
4677 static int32_t iwk_voltage_compensation(int32_t eep_voltage,
4678     int32_t curr_voltage)
4679 {
4680 	int32_t vol_comp = 0;
4681 
4682 	if ((TX_POWER_IWK_ILLEGAL_VOLTAGE == eep_voltage) ||
4683 	    (TX_POWER_IWK_ILLEGAL_VOLTAGE == curr_voltage)) {
4684 		return (vol_comp);
4685 	}
4686 
4687 	(void) iwk_division(curr_voltage-eep_voltage,
4688 	    TX_POWER_IWK_VOLTAGE_CODES_PER_03V, &vol_comp);
4689 
4690 	if (curr_voltage > eep_voltage) {
4691 		vol_comp *= 2;
4692 	}
4693 	if ((vol_comp < -2) || (vol_comp > 2)) {
4694 		vol_comp = 0;
4695 	}
4696 
4697 	return (vol_comp);
4698 }
4699 
4700 /*
4701  * Thermal compensation values for txpower for various frequency ranges ...
4702  * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust
4703  */
4704 static struct iwk_txpower_tempera_comp {
4705 	int32_t degrees_per_05db_a;
4706 	int32_t degrees_per_05db_a_denom;
4707 } txpower_tempera_comp_table[CALIB_CH_GROUP_MAX] = {
4708 	{9, 2},			/* group 0 5.2, ch  34-43 */
4709 	{4, 1},			/* group 1 5.2, ch  44-70 */
4710 	{4, 1},			/* group 2 5.2, ch  71-124 */
4711 	{4, 1},			/* group 3 5.2, ch 125-200 */
4712 	{3, 1}			/* group 4 2.4, ch   all */
4713 };
4714 
4715 /*
4716  * bit-rate-dependent table to prevent Tx distortion, in half-dB units,
4717  * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates.
4718  */
4719 static int32_t back_off_table[] = {
4720 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
4721 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
4722 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
4723 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
4724 	10			/* CCK */
4725 };
4726 
4727 /* determine minimum Tx power index in gain table */
4728 static int32_t iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G)
4729 {
4730 	if ((!is_24G) && ((rate_pow_idx & 7) <= 4)) {
4731 		return (MIN_TX_GAIN_INDEX_52GHZ_EXT);
4732 	}
4733 
4734 	return (MIN_TX_GAIN_INDEX);
4735 }
4736 
4737 /*
4738  * Determine DSP and radio gain according to temperature and other factors.
4739  * This function is the majority of Tx power calibration
4740  */
4741 static int iwk_txpower_table_cmd_init(iwk_sc_t *sc,
4742     struct iwk_tx_power_db *tp_db)
4743 {
4744 	int is_24G, is_fat, is_high_chan, is_mimo;
4745 	int c, r;
4746 	int32_t target_power;
4747 	int32_t tx_grp = CALIB_CH_GROUP_MAX;
4748 	uint16_t channel;
4749 	uint8_t saturation_power;
4750 	int32_t regu_power;
4751 	int32_t curr_regu_power;
4752 	struct iwk_eep_channel *eep_chan_p;
4753 	struct iwk_eep_calib_channel_info eep_chan_calib;
4754 	int32_t eep_voltage, init_voltage;
4755 	int32_t voltage_compensation;
4756 	int32_t temperature;
4757 	int32_t degrees_per_05db_num;
4758 	int32_t degrees_per_05db_denom;
4759 	struct iwk_eep_calib_measure *measure_p;
4760 	int32_t interpo_temp;
4761 	int32_t power_limit;
4762 	int32_t atten_value;
4763 	int32_t tempera_comp[2];
4764 	int32_t interpo_gain_idx[2];
4765 	int32_t interpo_actual_pow[2];
4766 	union iwk_tx_power_dual_stream txpower_gains;
4767 	int32_t txpower_gains_idx;
4768 
4769 	channel = sc->sc_config.chan;
4770 
4771 	/* 2.4 GHz or 5 GHz band */
4772 	is_24G = iwk_is_24G_band(sc);
4773 
4774 	/* fat channel or not */
4775 	is_fat = iwk_is_fat_channel(sc);
4776 
4777 	/*
4778 	 * using low half channel number or high half channel number
4779 	 * identify fat channel
4780 	 */
4781 	if (is_fat && (sc->sc_config.flags &
4782 	    RXON_FLG_CONTROL_CHANNEL_LOC_HIGH_MSK)) {
4783 		is_high_chan = 1;
4784 	}
4785 
4786 	if ((channel > 0) && (channel < 200)) {
4787 		/* get regulatory channel data from eeprom */
4788 		eep_chan_p = iwk_get_eep_channel(sc, channel, is_24G,
4789 		    is_fat, is_high_chan);
4790 		if (NULL == eep_chan_p) {
4791 			cmn_err(CE_WARN,
4792 			    "iwk_txpower_table_cmd_init(): "
4793 			    "can't get channel infomation\n");
4794 			return (DDI_FAILURE);
4795 		}
4796 	} else {
4797 		cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4798 		    "channel(%d) isn't in proper range\n",
4799 		    channel);
4800 		return (DDI_FAILURE);
4801 	}
4802 
4803 	/* initial value of Tx power */
4804 	sc->sc_user_txpower = (int32_t)eep_chan_p->max_power_avg;
4805 	if (sc->sc_user_txpower < IWK_TX_POWER_TARGET_POWER_MIN) {
4806 		cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4807 		    "user TX power is too weak\n");
4808 		return (DDI_FAILURE);
4809 	} else if (sc->sc_user_txpower > IWK_TX_POWER_TARGET_POWER_MAX) {
4810 		cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4811 		    "user TX power is too strong\n");
4812 		return (DDI_FAILURE);
4813 	}
4814 
4815 	target_power = 2 * sc->sc_user_txpower;
4816 
4817 	/* determine which group current channel belongs to */
4818 	tx_grp = iwk_txpower_grp(channel);
4819 	if (tx_grp < 0) {
4820 		return (tx_grp);
4821 	}
4822 
4823 
4824 	if (is_fat) {
4825 		if (is_high_chan) {
4826 			channel -= 2;
4827 		} else {
4828 			channel += 2;
4829 		}
4830 	}
4831 
4832 	/* determine saturation power */
4833 	if (is_24G) {
4834 		saturation_power =
4835 		    sc->sc_eep_map.calib_info.saturation_power24;
4836 	} else {
4837 		saturation_power =
4838 		    sc->sc_eep_map.calib_info.saturation_power52;
4839 	}
4840 
4841 	if (saturation_power < IWK_TX_POWER_SATURATION_MIN ||
4842 	    saturation_power > IWK_TX_POWER_SATURATION_MAX) {
4843 		if (is_24G) {
4844 			saturation_power = IWK_TX_POWER_DEFAULT_SATURATION_24;
4845 		} else {
4846 			saturation_power = IWK_TX_POWER_DEFAULT_SATURATION_52;
4847 		}
4848 	}
4849 
4850 	/* determine regulatory power */
4851 	regu_power = (int32_t)eep_chan_p->max_power_avg * 2;
4852 	if ((regu_power < IWK_TX_POWER_REGULATORY_MIN) ||
4853 	    (regu_power > IWK_TX_POWER_REGULATORY_MAX)) {
4854 		if (is_24G) {
4855 			regu_power = IWK_TX_POWER_DEFAULT_REGULATORY_24;
4856 		} else {
4857 			regu_power = IWK_TX_POWER_DEFAULT_REGULATORY_52;
4858 		}
4859 	}
4860 
4861 	/*
4862 	 * get measurement data for current channel
4863 	 * suach as temperature,index to gain table,actual Tx power
4864 	 */
4865 	(void) iwk_channel_interpolate(sc, channel, &eep_chan_calib);
4866 
4867 	eep_voltage = (int32_t)sc->sc_eep_map.calib_info.voltage;
4868 	init_voltage = (int32_t)sc->sc_card_alive_init.voltage;
4869 
4870 	/* calculate voltage compensation to Tx power */
4871 	voltage_compensation =
4872 	    iwk_voltage_compensation(eep_voltage, init_voltage);
4873 
4874 	if (sc->sc_tempera >= IWK_TX_POWER_TEMPERATURE_MIN) {
4875 		temperature = sc->sc_tempera;
4876 	} else {
4877 		temperature = IWK_TX_POWER_TEMPERATURE_MIN;
4878 	}
4879 	if (sc->sc_tempera <= IWK_TX_POWER_TEMPERATURE_MAX) {
4880 		temperature = sc->sc_tempera;
4881 	} else {
4882 		temperature = IWK_TX_POWER_TEMPERATURE_MAX;
4883 	}
4884 	temperature = KELVIN_TO_CELSIUS(temperature);
4885 
4886 	degrees_per_05db_num =
4887 	    txpower_tempera_comp_table[tx_grp].degrees_per_05db_a;
4888 	degrees_per_05db_denom =
4889 	    txpower_tempera_comp_table[tx_grp].degrees_per_05db_a_denom;
4890 
4891 	for (c = 0; c < 2; c++) {  /* go through all chains */
4892 		measure_p = &eep_chan_calib.measure[c][1];
4893 		interpo_temp = measure_p->temperature;
4894 
4895 		/* determine temperature compensation to Tx power */
4896 		(void) iwk_division(
4897 		    (temperature-interpo_temp)*degrees_per_05db_denom,
4898 		    degrees_per_05db_num, &tempera_comp[c]);
4899 
4900 		interpo_gain_idx[c] = measure_p->gain_idx;
4901 		interpo_actual_pow[c] = measure_p->actual_pow;
4902 	}
4903 
4904 	/*
4905 	 * go through all rate entries in Tx power table
4906 	 */
4907 	for (r = 0; r < POWER_TABLE_NUM_ENTRIES; r++) {
4908 		if (r & 0x8) {
4909 			/* need to lower regulatory power for MIMO mode */
4910 			curr_regu_power = regu_power -
4911 			    IWK_TX_POWER_MIMO_REGULATORY_COMPENSATION;
4912 			is_mimo = 1;
4913 		} else {
4914 			curr_regu_power = regu_power;
4915 			is_mimo = 0;
4916 		}
4917 
4918 		power_limit = saturation_power - back_off_table[r];
4919 		if (power_limit > curr_regu_power) {
4920 			/* final Tx power limit */
4921 			power_limit = curr_regu_power;
4922 		}
4923 
4924 		if (target_power > power_limit) {
4925 			target_power = power_limit; /* final target Tx power */
4926 		}
4927 
4928 		for (c = 0; c < 2; c++) {	  /* go through all Tx chains */
4929 			if (is_mimo) {
4930 				atten_value =
4931 				    sc->sc_card_alive_init.tx_atten[tx_grp][c];
4932 			} else {
4933 				atten_value = 0;
4934 			}
4935 
4936 			/*
4937 			 * calculate index in gain table
4938 			 * this step is very important
4939 			 */
4940 			txpower_gains_idx = interpo_gain_idx[c] -
4941 			    (target_power - interpo_actual_pow[c]) -
4942 			    tempera_comp[c] - voltage_compensation +
4943 			    atten_value;
4944 
4945 			if (txpower_gains_idx <
4946 			    iwk_min_power_index(r, is_24G)) {
4947 				txpower_gains_idx =
4948 				    iwk_min_power_index(r, is_24G);
4949 			}
4950 
4951 			if (!is_24G) {
4952 				/*
4953 				 * support negative index for 5 GHz
4954 				 * band
4955 				 */
4956 				txpower_gains_idx += 9;
4957 			}
4958 
4959 			if (POWER_TABLE_CCK_ENTRY == r) {
4960 				/* for CCK mode, make necessary attenuaton */
4961 				txpower_gains_idx +=
4962 				    IWK_TX_POWER_CCK_COMPENSATION_C_STEP;
4963 			}
4964 
4965 			if (txpower_gains_idx > 107) {
4966 				txpower_gains_idx = 107;
4967 			} else if (txpower_gains_idx < 0) {
4968 				txpower_gains_idx = 0;
4969 			}
4970 
4971 			/* search DSP and radio gains in gain table */
4972 			txpower_gains.s.radio_tx_gain[c] =
4973 			    gains_table[is_24G][txpower_gains_idx].radio;
4974 			txpower_gains.s.dsp_predis_atten[c] =
4975 			    gains_table[is_24G][txpower_gains_idx].dsp;
4976 
4977 			IWK_DBG((IWK_DEBUG_CALIBRATION,
4978 			    "rate_index: %d, "
4979 			    "gain_index %d, c: %d,is_mimo: %d\n",
4980 			    r, txpower_gains_idx, c, is_mimo));
4981 		}
4982 
4983 		/* initialize Tx power table */
4984 		if (r < POWER_TABLE_NUM_HT_OFDM_ENTRIES) {
4985 			tp_db->ht_ofdm_power[r].dw = txpower_gains.dw;
4986 		} else {
4987 			tp_db->legacy_cck_power.dw = txpower_gains.dw;
4988 		}
4989 	}
4990 
4991 	return (IWK_SUCCESS);
4992 }
4993 
4994 /*
4995  * make Tx power calibration to adjust Tx power.
4996  * This is completed by sending out Tx power table command.
4997  */
4998 static int iwk_tx_power_calibration(iwk_sc_t *sc)
4999 {
5000 	iwk_tx_power_table_cmd_t cmd;
5001 	int rv;
5002 
5003 	if (sc->sc_flags & IWK_F_SCANNING) {
5004 		return (IWK_SUCCESS);
5005 	}
5006 
5007 	/* necessary initialization to Tx power table command */
5008 	cmd.band = (uint8_t)iwk_is_24G_band(sc);
5009 	cmd.channel = sc->sc_config.chan;
5010 	cmd.channel_normal_width = 0;
5011 
5012 	/* initialize Tx power table */
5013 	rv = iwk_txpower_table_cmd_init(sc, &cmd.tx_power);
5014 	if (rv) {
5015 		cmn_err(CE_NOTE, "rv= %d\n", rv);
5016 		return (rv);
5017 	}
5018 
5019 	/* send out Tx power table command */
5020 	rv = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &cmd, sizeof (cmd), 1);
5021 	if (rv) {
5022 		return (rv);
5023 	}
5024 
5025 	/* record current temperature */
5026 	sc->sc_last_tempera = sc->sc_tempera;
5027 
5028 	return (IWK_SUCCESS);
5029 }
5030 
5031 /* This function is the handler of statistics notification from uCode */
5032 static void iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc)
5033 {
5034 	int is_diff;
5035 	struct iwk_notif_statistics *statistics_p =
5036 	    (struct iwk_notif_statistics *)(desc + 1);
5037 
5038 	mutex_enter(&sc->sc_glock);
5039 
5040 	is_diff = (sc->sc_statistics.general.temperature !=
5041 	    statistics_p->general.temperature) ||
5042 	    ((sc->sc_statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK) !=
5043 	    (statistics_p->flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK));
5044 
5045 	/* update statistics data */
5046 	(void) memcpy(&sc->sc_statistics, statistics_p,
5047 	    sizeof (struct iwk_notif_statistics));
5048 
5049 	sc->sc_flags |= IWK_F_STATISTICS;
5050 
5051 	if (!(sc->sc_flags & IWK_F_SCANNING)) {
5052 		/* make Receiver gain balance calibration */
5053 		(void) iwk_rxgain_diff(sc);
5054 
5055 		/* make Receiver sensitivity calibration */
5056 		(void) iwk_rx_sens(sc);
5057 	}
5058 
5059 
5060 	if (!is_diff) {
5061 		mutex_exit(&sc->sc_glock);
5062 		return;
5063 	}
5064 
5065 	/* calibration current temperature of 4965 chipset */
5066 	sc->sc_tempera = iwk_curr_tempera(sc);
5067 
5068 	/* distinct temperature change will trigger Tx power calibration */
5069 	if (((sc->sc_tempera - sc->sc_last_tempera) >= 3) ||
5070 	    ((sc->sc_last_tempera - sc->sc_tempera) >= 3)) {
5071 		/* make Tx power calibration */
5072 		(void) iwk_tx_power_calibration(sc);
5073 	}
5074 
5075 	mutex_exit(&sc->sc_glock);
5076 }
5077 
5078 /* Determine this station is in associated state or not */
5079 static int iwk_is_associated(iwk_sc_t *sc)
5080 {
5081 	return (sc->sc_config.filter_flags & RXON_FILTER_ASSOC_MSK);
5082 }
5083 
5084 /* Make necessary preparation for Receiver gain balance calibration */
5085 static int iwk_rxgain_diff_init(iwk_sc_t *sc)
5086 {
5087 	int i, rv;
5088 	struct iwk_calibration_cmd cmd;
5089 	struct iwk_rx_gain_diff *gain_diff_p;
5090 
5091 	gain_diff_p = &sc->sc_rxgain_diff;
5092 
5093 	(void) memset(gain_diff_p, 0, sizeof (struct iwk_rx_gain_diff));
5094 	(void) memset(&cmd, 0, sizeof (struct iwk_calibration_cmd));
5095 
5096 	for (i = 0; i < RX_CHAINS_NUM; i++) {
5097 		gain_diff_p->gain_diff_chain[i] = CHAIN_GAIN_DIFF_INIT_VAL;
5098 	}
5099 
5100 	if (iwk_is_associated(sc)) {
5101 		cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
5102 		cmd.diff_gain_a = 0;
5103 		cmd.diff_gain_b = 0;
5104 		cmd.diff_gain_c = 0;
5105 
5106 		/* assume the gains of every Rx chains is balanceable */
5107 		rv = iwk_cmd(sc, REPLY_PHY_CALIBRATION_CMD, &cmd,
5108 		    sizeof (cmd), 1);
5109 		if (rv) {
5110 			return (rv);
5111 		}
5112 
5113 		gain_diff_p->state = IWK_GAIN_DIFF_ACCUMULATE;
5114 	}
5115 
5116 	return (IWK_SUCCESS);
5117 }
5118 
5119 /*
5120  * make Receiver gain balance to balance Rx gain between Rx chains
5121  * and determine which chain is disconnected
5122  */
5123 static int iwk_rxgain_diff(iwk_sc_t *sc)
5124 {
5125 	int i, is_24G, rv;
5126 	int max_beacon_chain_n;
5127 	int min_noise_chain_n;
5128 	uint16_t channel_n;
5129 	int32_t beacon_diff;
5130 	int32_t noise_diff;
5131 	uint32_t noise_chain_a, noise_chain_b, noise_chain_c;
5132 	uint32_t beacon_chain_a, beacon_chain_b, beacon_chain_c;
5133 	struct iwk_calibration_cmd cmd;
5134 	uint32_t beacon_aver[RX_CHAINS_NUM] = {0xFFFFFFFF};
5135 	uint32_t noise_aver[RX_CHAINS_NUM] = {0xFFFFFFFF};
5136 	struct statistics_rx_non_phy *rx_general_p =
5137 	    &sc->sc_statistics.rx.general;
5138 	struct iwk_rx_gain_diff *gain_diff_p = &sc->sc_rxgain_diff;
5139 
5140 	if (INTERFERENCE_DATA_AVAILABLE !=
5141 	    rx_general_p->interference_data_flag) {
5142 		return (IWK_SUCCESS);
5143 	}
5144 
5145 	if (IWK_GAIN_DIFF_ACCUMULATE != gain_diff_p->state) {
5146 		return (IWK_SUCCESS);
5147 	}
5148 
5149 	is_24G = iwk_is_24G_band(sc);
5150 	channel_n = sc->sc_config.chan;	 /* channel number */
5151 
5152 	if ((channel_n != (sc->sc_statistics.flag >> 16)) ||
5153 	    ((STATISTICS_REPLY_FLG_BAND_24G_MSK ==
5154 	    (sc->sc_statistics.flag & STATISTICS_REPLY_FLG_BAND_24G_MSK)) &&
5155 	    !is_24G)) {
5156 		return (IWK_SUCCESS);
5157 	}
5158 
5159 	/* Rx chain's noise strength from statistics notification */
5160 	noise_chain_a = rx_general_p->beacon_silence_rssi_a & 0xFF;
5161 	noise_chain_b = rx_general_p->beacon_silence_rssi_b & 0xFF;
5162 	noise_chain_c = rx_general_p->beacon_silence_rssi_c & 0xFF;
5163 
5164 	/* Rx chain's beacon strength from statistics notification */
5165 	beacon_chain_a = rx_general_p->beacon_rssi_a & 0xFF;
5166 	beacon_chain_b = rx_general_p->beacon_rssi_b & 0xFF;
5167 	beacon_chain_c = rx_general_p->beacon_rssi_c & 0xFF;
5168 
5169 	gain_diff_p->beacon_count++;
5170 
5171 	/* accumulate chain's noise strength */
5172 	gain_diff_p->noise_stren_a += noise_chain_a;
5173 	gain_diff_p->noise_stren_b += noise_chain_b;
5174 	gain_diff_p->noise_stren_c += noise_chain_c;
5175 
5176 	/* accumulate chain's beacon strength */
5177 	gain_diff_p->beacon_stren_a += beacon_chain_a;
5178 	gain_diff_p->beacon_stren_b += beacon_chain_b;
5179 	gain_diff_p->beacon_stren_c += beacon_chain_c;
5180 
5181 	if (BEACON_NUM_20 == gain_diff_p->beacon_count) {
5182 		/* calculate average beacon strength */
5183 		beacon_aver[0] = (gain_diff_p->beacon_stren_a) / BEACON_NUM_20;
5184 		beacon_aver[1] = (gain_diff_p->beacon_stren_b) / BEACON_NUM_20;
5185 		beacon_aver[2] = (gain_diff_p->beacon_stren_c) / BEACON_NUM_20;
5186 
5187 		/* calculate average noise strength */
5188 		noise_aver[0] = (gain_diff_p->noise_stren_a) / BEACON_NUM_20;
5189 		noise_aver[1] = (gain_diff_p->noise_stren_b) / BEACON_NUM_20;
5190 		noise_aver[2] = (gain_diff_p->noise_stren_b) / BEACON_NUM_20;
5191 
5192 		/* determine maximum beacon strength among 3 chains */
5193 		if ((beacon_aver[0] >= beacon_aver[1]) &&
5194 		    (beacon_aver[0] >= beacon_aver[2])) {
5195 			max_beacon_chain_n = 0;
5196 			gain_diff_p->connected_chains = 1 << 0;
5197 		} else if (beacon_aver[1] >= beacon_aver[2]) {
5198 			max_beacon_chain_n = 1;
5199 			gain_diff_p->connected_chains = 1 << 1;
5200 		} else {
5201 			max_beacon_chain_n = 2;
5202 			gain_diff_p->connected_chains = 1 << 2;
5203 		}
5204 
5205 		/* determine which chain is disconnected */
5206 		for (i = 0; i < RX_CHAINS_NUM; i++) {
5207 			if (i != max_beacon_chain_n) {
5208 				beacon_diff = beacon_aver[max_beacon_chain_n] -
5209 				    beacon_aver[i];
5210 				if (beacon_diff > MAX_ALLOWED_DIFF) {
5211 					gain_diff_p->disconnect_chain[i] = 1;
5212 				} else {
5213 					gain_diff_p->connected_chains |=
5214 					    (1 << i);
5215 				}
5216 			}
5217 		}
5218 
5219 		/*
5220 		 * if chain A and B are both disconnected,
5221 		 * assume the stronger in beacon strength is connected
5222 		 */
5223 		if (gain_diff_p->disconnect_chain[0] &&
5224 		    gain_diff_p->disconnect_chain[1]) {
5225 			if (beacon_aver[0] >= beacon_aver[1]) {
5226 				gain_diff_p->disconnect_chain[0] = 0;
5227 				gain_diff_p->connected_chains |= (1 << 0);
5228 			} else {
5229 				gain_diff_p->disconnect_chain[1] = 0;
5230 				gain_diff_p->connected_chains |= (1 << 1);
5231 			}
5232 		}
5233 
5234 		/* determine minimum noise strength among 3 chains */
5235 		if (!gain_diff_p->disconnect_chain[0]) {
5236 			min_noise_chain_n = 0;
5237 
5238 			for (i = 0; i < RX_CHAINS_NUM; i++) {
5239 				if (!gain_diff_p->disconnect_chain[i] &&
5240 				    (noise_aver[i] <=
5241 				    noise_aver[min_noise_chain_n])) {
5242 					min_noise_chain_n = i;
5243 				}
5244 
5245 			}
5246 		} else {
5247 			min_noise_chain_n = 1;
5248 
5249 			for (i = 0; i < RX_CHAINS_NUM; i++) {
5250 				if (!gain_diff_p->disconnect_chain[i] &&
5251 				    (noise_aver[i] <=
5252 				    noise_aver[min_noise_chain_n])) {
5253 					min_noise_chain_n = i;
5254 				}
5255 			}
5256 		}
5257 
5258 		gain_diff_p->gain_diff_chain[min_noise_chain_n] = 0;
5259 
5260 		/* determine gain difference between chains */
5261 		for (i = 0; i < RX_CHAINS_NUM; i++) {
5262 			if (!gain_diff_p->disconnect_chain[i] &&
5263 			    (CHAIN_GAIN_DIFF_INIT_VAL ==
5264 			    gain_diff_p->gain_diff_chain[i])) {
5265 
5266 				noise_diff = noise_aver[i] -
5267 				    noise_aver[min_noise_chain_n];
5268 				gain_diff_p->gain_diff_chain[i] =
5269 				    (uint8_t)((noise_diff * 10) / 15);
5270 
5271 				if (gain_diff_p->gain_diff_chain[i] > 3) {
5272 					gain_diff_p->gain_diff_chain[i] = 3;
5273 				}
5274 
5275 				gain_diff_p->gain_diff_chain[i] |= (1 << 2);
5276 			} else {
5277 				gain_diff_p->gain_diff_chain[i] = 0;
5278 			}
5279 		}
5280 
5281 		if (!gain_diff_p->gain_diff_send) {
5282 			gain_diff_p->gain_diff_send = 1;
5283 
5284 			(void) memset(&cmd, 0, sizeof (cmd));
5285 
5286 			cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
5287 			cmd.diff_gain_a = gain_diff_p->gain_diff_chain[0];
5288 			cmd.diff_gain_b = gain_diff_p->gain_diff_chain[1];
5289 			cmd.diff_gain_c = gain_diff_p->gain_diff_chain[2];
5290 
5291 			/*
5292 			 * send out PHY calibration command to
5293 			 * adjust every chain's Rx gain
5294 			 */
5295 			rv = iwk_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
5296 			    &cmd, sizeof (cmd), 1);
5297 			if (rv) {
5298 				return (rv);
5299 			}
5300 
5301 			gain_diff_p->state = IWK_GAIN_DIFF_CALIBRATED;
5302 		}
5303 
5304 		gain_diff_p->beacon_stren_a = 0;
5305 		gain_diff_p->beacon_stren_b = 0;
5306 		gain_diff_p->beacon_stren_c = 0;
5307 
5308 		gain_diff_p->noise_stren_a = 0;
5309 		gain_diff_p->noise_stren_b = 0;
5310 		gain_diff_p->noise_stren_c = 0;
5311 	}
5312 
5313 	return (IWK_SUCCESS);
5314 }
5315 
5316 /* Make necessary preparation for Receiver sensitivity calibration */
5317 static int iwk_rx_sens_init(iwk_sc_t *sc)
5318 {
5319 	int i, rv;
5320 	struct iwk_rx_sensitivity_cmd cmd;
5321 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5322 
5323 	(void) memset(&cmd, 0, sizeof (struct iwk_rx_sensitivity_cmd));
5324 	(void) memset(rx_sens_p, 0, sizeof (struct iwk_rx_sensitivity));
5325 
5326 	rx_sens_p->auto_corr_ofdm_x4 = 90;
5327 	rx_sens_p->auto_corr_mrc_ofdm_x4 = 170;
5328 	rx_sens_p->auto_corr_ofdm_x1 = 105;
5329 	rx_sens_p->auto_corr_mrc_ofdm_x1 = 220;
5330 
5331 	rx_sens_p->auto_corr_cck_x4 = 125;
5332 	rx_sens_p->auto_corr_mrc_cck_x4 = 200;
5333 	rx_sens_p->min_energy_det_cck = 100;
5334 
5335 	rx_sens_p->flags &= (~IWK_SENSITIVITY_CALIB_ALLOW_MSK);
5336 	rx_sens_p->flags &= (~IWK_SENSITIVITY_OFDM_UPDATE_MSK);
5337 	rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
5338 
5339 	rx_sens_p->last_bad_plcp_cnt_ofdm = 0;
5340 	rx_sens_p->last_false_alarm_cnt_ofdm = 0;
5341 	rx_sens_p->last_bad_plcp_cnt_cck = 0;
5342 	rx_sens_p->last_false_alarm_cnt_cck = 0;
5343 
5344 	rx_sens_p->cck_curr_state = IWK_TOO_MANY_FALSE_ALARM;
5345 	rx_sens_p->cck_prev_state = IWK_TOO_MANY_FALSE_ALARM;
5346 	rx_sens_p->cck_no_false_alarm_num = 0;
5347 	rx_sens_p->cck_beacon_idx = 0;
5348 
5349 	for (i = 0; i < 10; i++) {
5350 		rx_sens_p->cck_beacon_min[i] = 0;
5351 	}
5352 
5353 	rx_sens_p->cck_noise_idx = 0;
5354 	rx_sens_p->cck_noise_ref = 0;
5355 
5356 	for (i = 0; i < 20; i++) {
5357 		rx_sens_p->cck_noise_max[i] = 0;
5358 	}
5359 
5360 	rx_sens_p->cck_noise_diff = 0;
5361 	rx_sens_p->cck_no_false_alarm_num = 0;
5362 
5363 	cmd.control = IWK_SENSITIVITY_CONTROL_WORK_TABLE;
5364 
5365 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
5366 	    rx_sens_p->auto_corr_ofdm_x4;
5367 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
5368 	    rx_sens_p->auto_corr_mrc_ofdm_x4;
5369 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
5370 	    rx_sens_p->auto_corr_ofdm_x1;
5371 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
5372 	    rx_sens_p->auto_corr_mrc_ofdm_x1;
5373 
5374 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
5375 	    rx_sens_p->auto_corr_cck_x4;
5376 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
5377 	    rx_sens_p->auto_corr_mrc_cck_x4;
5378 	cmd.table[MIN_ENERGY_CCK_DET_IDX] = rx_sens_p->min_energy_det_cck;
5379 
5380 	cmd.table[MIN_ENERGY_OFDM_DET_IDX] = 100;
5381 	cmd.table[BARKER_CORR_TH_ADD_MIN_IDX] = 190;
5382 	cmd.table[BARKER_CORR_TH_ADD_MIN_MRC_IDX] = 390;
5383 	cmd.table[PTAM_ENERGY_TH_IDX] = 62;
5384 
5385 	/* at first, set up Rx to maximum sensitivity */
5386 	rv = iwk_cmd(sc, SENSITIVITY_CMD, &cmd, sizeof (cmd), 1);
5387 	if (rv) {
5388 		cmn_err(CE_WARN, "iwk_rx_sens_init(): "
5389 		    "in the process of initialization, "
5390 		    "failed to send rx sensitivity command\n");
5391 		return (rv);
5392 	}
5393 
5394 	rx_sens_p->flags |= IWK_SENSITIVITY_CALIB_ALLOW_MSK;
5395 
5396 	return (IWK_SUCCESS);
5397 }
5398 
5399 /*
5400  * make Receiver sensitivity calibration to adjust every chain's Rx sensitivity.
5401  * for more infomation, please refer to iwk_calibration.h file
5402  */
5403 static int iwk_rx_sens(iwk_sc_t *sc)
5404 {
5405 	int rv;
5406 	uint32_t actual_rx_time;
5407 	struct statistics_rx_non_phy *rx_general_p =
5408 	    &sc->sc_statistics.rx.general;
5409 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5410 	struct iwk_rx_sensitivity_cmd cmd;
5411 
5412 	if (!(rx_sens_p->flags & IWK_SENSITIVITY_CALIB_ALLOW_MSK)) {
5413 		cmn_err(CE_WARN, "iwk_rx_sens(): "
5414 		    "sensitivity initialization has not finished.\n");
5415 		return (DDI_FAILURE);
5416 	}
5417 
5418 	if (INTERFERENCE_DATA_AVAILABLE !=
5419 	    rx_general_p->interference_data_flag) {
5420 		cmn_err(CE_WARN, "iwk_rx_sens(): "
5421 		    "can't make rx sensitivity calibration,"
5422 		    "because of invalid statistics\n");
5423 		return (DDI_FAILURE);
5424 	}
5425 
5426 	actual_rx_time = rx_general_p->channel_load;
5427 	if (!actual_rx_time) {
5428 		IWK_DBG((IWK_DEBUG_CALIBRATION, "iwk_rx_sens(): "
5429 		    "can't make rx sensitivity calibration,"
5430 		    "because has not enough rx time\n"));
5431 		return (DDI_FAILURE);
5432 	}
5433 
5434 	/* make Rx sensitivity calibration for OFDM mode */
5435 	rv = iwk_ofdm_sens(sc, actual_rx_time);
5436 	if (rv) {
5437 		return (rv);
5438 	}
5439 
5440 	/* make Rx sensitivity calibration for CCK mode */
5441 	rv = iwk_cck_sens(sc, actual_rx_time);
5442 	if (rv) {
5443 		return (rv);
5444 	}
5445 
5446 	/*
5447 	 * if the sum of false alarm had not changed, nothing will be done
5448 	 */
5449 	if ((!(rx_sens_p->flags & IWK_SENSITIVITY_OFDM_UPDATE_MSK)) &&
5450 	    (!(rx_sens_p->flags & IWK_SENSITIVITY_CCK_UPDATE_MSK))) {
5451 		return (IWK_SUCCESS);
5452 	}
5453 
5454 	cmd.control = IWK_SENSITIVITY_CONTROL_WORK_TABLE;
5455 
5456 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
5457 	    rx_sens_p->auto_corr_ofdm_x4;
5458 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
5459 	    rx_sens_p->auto_corr_mrc_ofdm_x4;
5460 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
5461 	    rx_sens_p->auto_corr_ofdm_x1;
5462 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
5463 	    rx_sens_p->auto_corr_mrc_ofdm_x1;
5464 
5465 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
5466 	    rx_sens_p->auto_corr_cck_x4;
5467 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
5468 	    rx_sens_p->auto_corr_mrc_cck_x4;
5469 	cmd.table[MIN_ENERGY_CCK_DET_IDX] =
5470 	    rx_sens_p->min_energy_det_cck;
5471 
5472 	cmd.table[MIN_ENERGY_OFDM_DET_IDX] = 100;
5473 	cmd.table[BARKER_CORR_TH_ADD_MIN_IDX] = 190;
5474 	cmd.table[BARKER_CORR_TH_ADD_MIN_MRC_IDX] = 390;
5475 	cmd.table[PTAM_ENERGY_TH_IDX] = 62;
5476 
5477 	/*
5478 	 * send sensitivity command to complete actual sensitivity calibration
5479 	 */
5480 	rv = iwk_cmd(sc, SENSITIVITY_CMD, &cmd, sizeof (cmd), 1);
5481 	if (rv) {
5482 		cmn_err(CE_WARN, "iwk_rx_sens(): "
5483 		    "fail to send rx sensitivity command\n");
5484 		return (rv);
5485 	}
5486 
5487 	return (IWK_SUCCESS);
5488 
5489 }
5490 
5491 /*
5492  * make Rx sensitivity calibration for CCK mode.
5493  * This is preparing parameters for Sensitivity command
5494  */
5495 static int iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time)
5496 {
5497 	int i;
5498 	uint8_t noise_a, noise_b, noise_c;
5499 	uint8_t max_noise_abc, max_noise_20;
5500 	uint32_t beacon_a, beacon_b, beacon_c;
5501 	uint32_t min_beacon_abc, max_beacon_10;
5502 	uint32_t cck_fa, cck_bp;
5503 	uint32_t cck_sum_fa_bp;
5504 	uint32_t temp;
5505 	struct statistics_rx_non_phy *rx_general_p =
5506 	    &sc->sc_statistics.rx.general;
5507 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5508 
5509 	cck_fa = sc->sc_statistics.rx.cck.false_alarm_cnt;
5510 	cck_bp = sc->sc_statistics.rx.cck.plcp_err;
5511 
5512 	/* accumulate false alarm */
5513 	if (rx_sens_p->last_false_alarm_cnt_cck > cck_fa) {
5514 		temp = rx_sens_p->last_false_alarm_cnt_cck;
5515 		rx_sens_p->last_false_alarm_cnt_cck = cck_fa;
5516 		cck_fa += (0xFFFFFFFF - temp);
5517 	} else {
5518 		cck_fa -= rx_sens_p->last_false_alarm_cnt_cck;
5519 		rx_sens_p->last_false_alarm_cnt_cck += cck_fa;
5520 	}
5521 
5522 	/* accumulate bad plcp */
5523 	if (rx_sens_p->last_bad_plcp_cnt_cck > cck_bp) {
5524 		temp = rx_sens_p->last_bad_plcp_cnt_cck;
5525 		rx_sens_p->last_bad_plcp_cnt_cck = cck_bp;
5526 		cck_bp += (0xFFFFFFFF - temp);
5527 	} else {
5528 		cck_bp -= rx_sens_p->last_bad_plcp_cnt_cck;
5529 		rx_sens_p->last_bad_plcp_cnt_cck += cck_bp;
5530 	}
5531 
5532 	/*
5533 	 * calculate relative value
5534 	 */
5535 	cck_sum_fa_bp = (cck_fa + cck_bp) * 200 * 1024;
5536 	rx_sens_p->cck_noise_diff = 0;
5537 
5538 	noise_a =
5539 	    (uint8_t)((rx_general_p->beacon_silence_rssi_a & 0xFF00) >> 8);
5540 	noise_b =
5541 	    (uint8_t)((rx_general_p->beacon_silence_rssi_b & 0xFF00) >> 8);
5542 	noise_c =
5543 	    (uint8_t)((rx_general_p->beacon_silence_rssi_c & 0xFF00) >> 8);
5544 
5545 	beacon_a = rx_general_p->beacon_energy_a;
5546 	beacon_b = rx_general_p->beacon_energy_b;
5547 	beacon_c = rx_general_p->beacon_energy_c;
5548 
5549 	/* determine maximum noise among 3 chains */
5550 	if ((noise_a >= noise_b) && (noise_a >= noise_c)) {
5551 		max_noise_abc = noise_a;
5552 	} else if (noise_b >= noise_c) {
5553 		max_noise_abc = noise_b;
5554 	} else {
5555 		max_noise_abc = noise_c;
5556 	}
5557 
5558 	/* record maximum noise among 3 chains */
5559 	rx_sens_p->cck_noise_max[rx_sens_p->cck_noise_idx] = max_noise_abc;
5560 	rx_sens_p->cck_noise_idx++;
5561 	if (rx_sens_p->cck_noise_idx >= 20) {
5562 		rx_sens_p->cck_noise_idx = 0;
5563 	}
5564 
5565 	/* determine maximum noise among 20 max noise */
5566 	max_noise_20 = rx_sens_p->cck_noise_max[0];
5567 	for (i = 0; i < 20; i++) {
5568 		if (rx_sens_p->cck_noise_max[i] >= max_noise_20) {
5569 			max_noise_20 = rx_sens_p->cck_noise_max[i];
5570 		}
5571 	}
5572 
5573 	/* determine minimum beacon among 3 chains */
5574 	if ((beacon_a <= beacon_b) && (beacon_a <= beacon_c)) {
5575 		min_beacon_abc = beacon_a;
5576 	} else if (beacon_b <= beacon_c) {
5577 		min_beacon_abc = beacon_b;
5578 	} else {
5579 		min_beacon_abc = beacon_c;
5580 	}
5581 
5582 	/* record miminum beacon among 3 chains */
5583 	rx_sens_p->cck_beacon_min[rx_sens_p->cck_beacon_idx] = min_beacon_abc;
5584 	rx_sens_p->cck_beacon_idx++;
5585 	if (rx_sens_p->cck_beacon_idx >= 10) {
5586 		rx_sens_p->cck_beacon_idx = 0;
5587 	}
5588 
5589 	/* determine maximum beacon among 10 miminum beacon among 3 chains */
5590 	max_beacon_10 = rx_sens_p->cck_beacon_min[0];
5591 	for (i = 0; i < 10; i++) {
5592 		if (rx_sens_p->cck_beacon_min[i] >= max_beacon_10) {
5593 			max_beacon_10 = rx_sens_p->cck_beacon_min[i];
5594 		}
5595 	}
5596 
5597 	/* add a little margin */
5598 	max_beacon_10 += 6;
5599 
5600 	/* record the count of having no false alarms */
5601 	if (cck_sum_fa_bp < (5 * actual_rx_time)) {
5602 		rx_sens_p->cck_no_false_alarm_num++;
5603 	} else {
5604 		rx_sens_p->cck_no_false_alarm_num = 0;
5605 	}
5606 
5607 	/*
5608 	 * adjust parameters in sensitivity command
5609 	 * according to different status.
5610 	 * for more infomation, please refer to iwk_calibration.h file
5611 	 */
5612 	if (cck_sum_fa_bp > (50 * actual_rx_time)) {
5613 		rx_sens_p->cck_curr_state = IWK_TOO_MANY_FALSE_ALARM;
5614 
5615 		if (rx_sens_p->auto_corr_cck_x4 > 160) {
5616 			rx_sens_p->cck_noise_ref = max_noise_20;
5617 
5618 			if (rx_sens_p->min_energy_det_cck > 2) {
5619 				rx_sens_p->min_energy_det_cck -= 2;
5620 			}
5621 		}
5622 
5623 		if (rx_sens_p->auto_corr_cck_x4 < 160) {
5624 			rx_sens_p->auto_corr_cck_x4 = 160 + 1;
5625 		} else {
5626 			if ((rx_sens_p->auto_corr_cck_x4 + 3) < 200) {
5627 				rx_sens_p->auto_corr_cck_x4 += 3;
5628 			} else {
5629 				rx_sens_p->auto_corr_cck_x4 = 200;
5630 			}
5631 		}
5632 
5633 		if ((rx_sens_p->auto_corr_mrc_cck_x4 + 3) < 400) {
5634 			rx_sens_p->auto_corr_mrc_cck_x4 += 3;
5635 		} else {
5636 			rx_sens_p->auto_corr_mrc_cck_x4 = 400;
5637 		}
5638 
5639 		rx_sens_p->flags |= IWK_SENSITIVITY_CCK_UPDATE_MSK;
5640 
5641 	} else if (cck_sum_fa_bp < (5 * actual_rx_time)) {
5642 		rx_sens_p->cck_curr_state = IWK_TOO_FEW_FALSE_ALARM;
5643 
5644 		rx_sens_p->cck_noise_diff = (int32_t)rx_sens_p->cck_noise_ref -
5645 		    (int32_t)max_noise_20;
5646 
5647 		if ((rx_sens_p->cck_prev_state != IWK_TOO_MANY_FALSE_ALARM) &&
5648 		    ((rx_sens_p->cck_noise_diff > 2) ||
5649 		    (rx_sens_p->cck_no_false_alarm_num > 100))) {
5650 			if ((rx_sens_p->min_energy_det_cck + 2) < 97) {
5651 				rx_sens_p->min_energy_det_cck += 2;
5652 			} else {
5653 				rx_sens_p->min_energy_det_cck = 97;
5654 			}
5655 
5656 			if ((rx_sens_p->auto_corr_cck_x4 - 3) > 125) {
5657 				rx_sens_p->auto_corr_cck_x4 -= 3;
5658 			} else {
5659 				rx_sens_p->auto_corr_cck_x4 = 125;
5660 			}
5661 
5662 			if ((rx_sens_p->auto_corr_mrc_cck_x4 -3) > 200) {
5663 				rx_sens_p->auto_corr_mrc_cck_x4 -= 3;
5664 			} else {
5665 				rx_sens_p->auto_corr_mrc_cck_x4 = 200;
5666 			}
5667 
5668 			rx_sens_p->flags |= IWK_SENSITIVITY_CCK_UPDATE_MSK;
5669 		} else {
5670 			rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
5671 		}
5672 	} else {
5673 		rx_sens_p->cck_curr_state = IWK_GOOD_RANGE_FALSE_ALARM;
5674 
5675 		rx_sens_p->cck_noise_ref = max_noise_20;
5676 
5677 		if (IWK_TOO_MANY_FALSE_ALARM == rx_sens_p->cck_prev_state) {
5678 			rx_sens_p->min_energy_det_cck -= 8;
5679 		}
5680 
5681 		rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
5682 	}
5683 
5684 	if (rx_sens_p->min_energy_det_cck < max_beacon_10) {
5685 		rx_sens_p->min_energy_det_cck = (uint16_t)max_beacon_10;
5686 	}
5687 
5688 	rx_sens_p->cck_prev_state = rx_sens_p->cck_curr_state;
5689 
5690 	return (IWK_SUCCESS);
5691 }
5692 
5693 /*
5694  * make Rx sensitivity calibration for OFDM mode.
5695  * This is preparing parameters for Sensitivity command
5696  */
5697 static int iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time)
5698 {
5699 	uint32_t temp;
5700 	uint16_t temp1;
5701 	uint32_t ofdm_fa, ofdm_bp;
5702 	uint32_t ofdm_sum_fa_bp;
5703 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5704 
5705 	ofdm_fa = sc->sc_statistics.rx.ofdm.false_alarm_cnt;
5706 	ofdm_bp = sc->sc_statistics.rx.ofdm.plcp_err;
5707 
5708 	/* accumulate false alarm */
5709 	if (rx_sens_p->last_false_alarm_cnt_ofdm > ofdm_fa) {
5710 		temp = rx_sens_p->last_false_alarm_cnt_ofdm;
5711 		rx_sens_p->last_false_alarm_cnt_ofdm = ofdm_fa;
5712 		ofdm_fa += (0xFFFFFFFF - temp);
5713 	} else {
5714 		ofdm_fa -= rx_sens_p->last_false_alarm_cnt_ofdm;
5715 		rx_sens_p->last_false_alarm_cnt_ofdm += ofdm_fa;
5716 	}
5717 
5718 	/* accumulate bad plcp */
5719 	if (rx_sens_p->last_bad_plcp_cnt_ofdm > ofdm_bp) {
5720 		temp = rx_sens_p->last_bad_plcp_cnt_ofdm;
5721 		rx_sens_p->last_bad_plcp_cnt_ofdm = ofdm_bp;
5722 		ofdm_bp += (0xFFFFFFFF - temp);
5723 	} else {
5724 		ofdm_bp -= rx_sens_p->last_bad_plcp_cnt_ofdm;
5725 		rx_sens_p->last_bad_plcp_cnt_ofdm += ofdm_bp;
5726 	}
5727 
5728 	ofdm_sum_fa_bp = (ofdm_fa + ofdm_bp) * 200 * 1024; /* relative value */
5729 
5730 	/*
5731 	 * adjust parameter in sensitivity command according to different status
5732 	 */
5733 	if (ofdm_sum_fa_bp > (50 * actual_rx_time)) {
5734 		temp1 = rx_sens_p->auto_corr_ofdm_x4 + 1;
5735 		rx_sens_p->auto_corr_ofdm_x4 = (temp1 <= 120) ? temp1 : 120;
5736 
5737 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x4 + 1;
5738 		rx_sens_p->auto_corr_mrc_ofdm_x4 =
5739 		    (temp1 <= 210) ? temp1 : 210;
5740 
5741 		temp1 = rx_sens_p->auto_corr_ofdm_x1 + 1;
5742 		rx_sens_p->auto_corr_ofdm_x1 = (temp1 <= 140) ? temp1 : 140;
5743 
5744 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x1 + 1;
5745 		rx_sens_p->auto_corr_mrc_ofdm_x1 =
5746 		    (temp1 <= 270) ? temp1 : 270;
5747 
5748 		rx_sens_p->flags |= IWK_SENSITIVITY_OFDM_UPDATE_MSK;
5749 
5750 	} else if (ofdm_sum_fa_bp < (5 * actual_rx_time)) {
5751 		temp1 = rx_sens_p->auto_corr_ofdm_x4 - 1;
5752 		rx_sens_p->auto_corr_ofdm_x4 = (temp1 >= 85) ? temp1 : 85;
5753 
5754 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x4 - 1;
5755 		rx_sens_p->auto_corr_mrc_ofdm_x4 =
5756 		    (temp1 >= 170) ? temp1 : 170;
5757 
5758 		temp1 = rx_sens_p->auto_corr_ofdm_x1 - 1;
5759 		rx_sens_p->auto_corr_ofdm_x1 = (temp1 >= 105) ? temp1 : 105;
5760 
5761 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x1 - 1;
5762 		rx_sens_p->auto_corr_mrc_ofdm_x1 =
5763 		    (temp1 >= 220) ? temp1 : 220;
5764 
5765 		rx_sens_p->flags |= IWK_SENSITIVITY_OFDM_UPDATE_MSK;
5766 
5767 	} else {
5768 		rx_sens_p->flags &= (~IWK_SENSITIVITY_OFDM_UPDATE_MSK);
5769 	}
5770 
5771 	return (IWK_SUCCESS);
5772 }
5773 
5774 /*
5775  * additional process to management frames
5776  */
5777 static void iwk_recv_mgmt(struct ieee80211com *ic, mblk_t *mp,
5778     struct ieee80211_node *in,
5779     int subtype, int rssi, uint32_t rstamp)
5780 {
5781 	iwk_sc_t *sc = (iwk_sc_t *)ic;
5782 	struct ieee80211_frame *wh;
5783 	uint8_t index1, index2;
5784 	int err;
5785 
5786 	sc->sc_recv_mgmt(ic, mp, in, subtype, rssi, rstamp);
5787 
5788 	mutex_enter(&sc->sc_glock);
5789 	switch (subtype) {
5790 	case IEEE80211_FC0_SUBTYPE_BEACON:
5791 		if (sc->sc_ibss.ibss_beacon.syncbeacon && in == ic->ic_bss &&
5792 		    ic->ic_state == IEEE80211_S_RUN) {
5793 			if (ieee80211_beacon_update(ic, in,
5794 			    &sc->sc_ibss.ibss_beacon.iwk_boff,
5795 			    sc->sc_ibss.ibss_beacon.mp, 0)) {
5796 				bcopy(sc->sc_ibss.ibss_beacon.mp->b_rptr,
5797 				    sc->sc_ibss.ibss_beacon.beacon_cmd.
5798 				    bcon_frame,
5799 				    MBLKL(sc->sc_ibss.ibss_beacon.mp));
5800 			}
5801 			err = iwk_cmd(sc, REPLY_TX_BEACON,
5802 			    &sc->sc_ibss.ibss_beacon.beacon_cmd,
5803 			    sc->sc_ibss.ibss_beacon.beacon_cmd_len, 1);
5804 			if (err != IWK_SUCCESS) {
5805 				cmn_err(CE_WARN, "iwk_recv_mgmt(): "
5806 				    "failed to TX beacon.\n");
5807 			}
5808 			sc->sc_ibss.ibss_beacon.syncbeacon = 0;
5809 		}
5810 		if (ic->ic_opmode == IEEE80211_M_IBSS &&
5811 		    ic->ic_state == IEEE80211_S_RUN) {
5812 			wh = (struct ieee80211_frame *)mp->b_rptr;
5813 			mutex_enter(&sc->sc_ibss.node_tb_lock);
5814 			/*
5815 			 * search for node in ibss node table
5816 			 */
5817 			for (index1 = IWK_STA_ID; index1 < IWK_STATION_COUNT;
5818 			    index1++) {
5819 				if (sc->sc_ibss.ibss_node_tb[index1].used &&
5820 				    IEEE80211_ADDR_EQ(sc->sc_ibss.
5821 				    ibss_node_tb[index1].node.bssid,
5822 				    wh->i_addr2)) {
5823 					break;
5824 				}
5825 			}
5826 			/*
5827 			 * if don't find in ibss node table
5828 			 */
5829 			if (index1 >= IWK_BROADCAST_ID) {
5830 				err = iwk_clean_add_node_ibss(ic,
5831 				    wh->i_addr2, &index2);
5832 				if (err != IWK_SUCCESS) {
5833 					cmn_err(CE_WARN, "iwk_recv_mgmt(): "
5834 					    "failed to clean all nodes "
5835 					    "and add one node\n");
5836 				}
5837 			}
5838 			mutex_exit(&sc->sc_ibss.node_tb_lock);
5839 		}
5840 		break;
5841 	case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
5842 		break;
5843 	}
5844 	mutex_exit(&sc->sc_glock);
5845 }
5846 
5847 /*
5848  * 1)  log_event_table_ptr indicates base of the event log.  This traces
5849  *     a 256-entry history of uCode execution within a circular buffer.
5850  *     Its header format is:
5851  *
5852  *	uint32_t log_size;	log capacity (in number of entries)
5853  *	uint32_t type;	(1) timestamp with each entry, (0) no timestamp
5854  *	uint32_t wraps;	# times uCode has wrapped to top of circular buffer
5855  *      uint32_t write_index;	next circular buffer entry that uCode would fill
5856  *
5857  *     The header is followed by the circular buffer of log entries.  Entries
5858  *     with timestamps have the following format:
5859  *
5860  *	uint32_t event_id;     range 0 - 1500
5861  *	uint32_t timestamp;    low 32 bits of TSF (of network, if associated)
5862  *	uint32_t data;         event_id-specific data value
5863  *
5864  *     Entries without timestamps contain only event_id and data.
5865  */
5866 
5867 /*
5868  * iwk_write_event_log - Write event log to dmesg
5869  */
5870 static void iwk_write_event_log(iwk_sc_t *sc)
5871 {
5872 	uint32_t log_event_table_ptr;	/* Start address of event table */
5873 	uint32_t startptr;	/* Start address of log data */
5874 	uint32_t logptr;	/* address of log data entry */
5875 	uint32_t i, n, num_events;
5876 	uint32_t event_id, data1, data2; /* log data */
5877 
5878 	uint32_t log_size;   /* log capacity (in number of entries) */
5879 	uint32_t type;	/* (1)timestamp with each entry,(0) no timestamp */
5880 	uint32_t wraps;	/* # times uCode has wrapped to */
5881 			/* the top of circular buffer */
5882 	uint32_t idx; /* index of entry to be filled in next */
5883 
5884 	log_event_table_ptr = sc->sc_card_alive_run.log_event_table_ptr;
5885 	if (!(log_event_table_ptr)) {
5886 		IWK_DBG((IWK_DEBUG_EEPROM, "NULL event table pointer\n"));
5887 		return;
5888 	}
5889 
5890 	iwk_mac_access_enter(sc);
5891 
5892 	/* Read log header */
5893 	log_size = iwk_mem_read(sc, log_event_table_ptr);
5894 	log_event_table_ptr += sizeof (uint32_t); /* addr of "type" */
5895 	type = iwk_mem_read(sc, log_event_table_ptr);
5896 	log_event_table_ptr += sizeof (uint32_t); /* addr of "wraps" */
5897 	wraps = iwk_mem_read(sc, log_event_table_ptr);
5898 	log_event_table_ptr += sizeof (uint32_t); /* addr of "idx" */
5899 	idx = iwk_mem_read(sc, log_event_table_ptr);
5900 	startptr = log_event_table_ptr +
5901 	    sizeof (uint32_t); /* addr of start of log data */
5902 	if (!log_size & !wraps) {
5903 		IWK_DBG((IWK_DEBUG_EEPROM, "Empty log\n"));
5904 		iwk_mac_access_exit(sc);
5905 		return;
5906 	}
5907 
5908 	if (!wraps) {
5909 		num_events = idx;
5910 		logptr = startptr;
5911 	} else {
5912 		num_events = log_size - idx;
5913 		n = type ? 2 : 3;
5914 		logptr = startptr + (idx * n * sizeof (uint32_t));
5915 	}
5916 
5917 	for (i = 0; i < num_events; i++) {
5918 		event_id = iwk_mem_read(sc, logptr);
5919 		logptr += sizeof (uint32_t);
5920 		data1 = iwk_mem_read(sc, logptr);
5921 		logptr += sizeof (uint32_t);
5922 		if (type == 0) { /* no timestamp */
5923 			IWK_DBG((IWK_DEBUG_EEPROM, "Event ID=%d, Data=%x0x",
5924 			    event_id, data1));
5925 		} else { /* timestamp */
5926 			data2 = iwk_mem_read(sc, logptr);
5927 			printf("Time=%d, Event ID=%d, Data=0x%x\n",
5928 			    data1, event_id, data2);
5929 			IWK_DBG((IWK_DEBUG_EEPROM,
5930 			    "Time=%d, Event ID=%d, Data=0x%x\n",
5931 			    data1, event_id, data2));
5932 			logptr += sizeof (uint32_t);
5933 		}
5934 	}
5935 
5936 	/*
5937 	 * Print the wrapped around entries, if any
5938 	 */
5939 	if (wraps) {
5940 		logptr = startptr;
5941 		for (i = 0; i < idx; i++) {
5942 			event_id = iwk_mem_read(sc, logptr);
5943 			logptr += sizeof (uint32_t);
5944 			data1 = iwk_mem_read(sc, logptr);
5945 			logptr += sizeof (uint32_t);
5946 			if (type == 0) { /* no timestamp */
5947 				IWK_DBG((IWK_DEBUG_EEPROM,
5948 				    "Event ID=%d, Data=%x0x", event_id, data1));
5949 			} else { /* timestamp */
5950 				data2 = iwk_mem_read(sc, logptr);
5951 				IWK_DBG((IWK_DEBUG_EEPROM,
5952 				    "Time = %d, Event ID=%d, Data=0x%x\n",
5953 				    data1, event_id, data2));
5954 				logptr += sizeof (uint32_t);
5955 			}
5956 		}
5957 	}
5958 
5959 	iwk_mac_access_exit(sc);
5960 }
5961 
5962 /*
5963  * error_event_table_ptr indicates base of the error log.  This contains
5964  * information about any uCode error that occurs.  For 4965, the format is:
5965  *
5966  * uint32_t valid;        (nonzero) valid, (0) log is empty
5967  * uint32_t error_id;     type of error
5968  * uint32_t pc;           program counter
5969  * uint32_t blink1;       branch link
5970  * uint32_t blink2;       branch link
5971  * uint32_t ilink1;       interrupt link
5972  * uint32_t ilink2;       interrupt link
5973  * uint32_t data1;        error-specific data
5974  * uint32_t data2;        error-specific data
5975  * uint32_t line;         source code line of error
5976  * uint32_t bcon_time;    beacon timer
5977  * uint32_t tsf_low;      network timestamp function timer
5978  * uint32_t tsf_hi;       network timestamp function timer
5979  */
5980 /*
5981  * iwk_write_error_log - Write error log to dmesg
5982  */
5983 static void iwk_write_error_log(iwk_sc_t *sc)
5984 {
5985 	uint32_t err_ptr;	/* Start address of error log */
5986 	uint32_t valid;		/* is error log valid */
5987 
5988 	err_ptr = sc->sc_card_alive_run.error_event_table_ptr;
5989 	if (!(err_ptr)) {
5990 		IWK_DBG((IWK_DEBUG_EEPROM, "NULL error table pointer\n"));
5991 		return;
5992 	}
5993 
5994 	iwk_mac_access_enter(sc);
5995 
5996 	valid = iwk_mem_read(sc, err_ptr);
5997 	if (!(valid)) {
5998 		IWK_DBG((IWK_DEBUG_EEPROM, "Error data not valid\n"));
5999 		iwk_mac_access_exit(sc);
6000 		return;
6001 	}
6002 	err_ptr += sizeof (uint32_t);
6003 	IWK_DBG((IWK_DEBUG_EEPROM, "err=%d ", iwk_mem_read(sc, err_ptr)));
6004 	err_ptr += sizeof (uint32_t);
6005 	IWK_DBG((IWK_DEBUG_EEPROM, "pc=0x%X ", iwk_mem_read(sc, err_ptr)));
6006 	err_ptr += sizeof (uint32_t);
6007 	IWK_DBG((IWK_DEBUG_EEPROM,
6008 	    "branch link1=0x%X ", iwk_mem_read(sc, err_ptr)));
6009 	err_ptr += sizeof (uint32_t);
6010 	IWK_DBG((IWK_DEBUG_EEPROM,
6011 	    "branch link2=0x%X ", iwk_mem_read(sc, err_ptr)));
6012 	err_ptr += sizeof (uint32_t);
6013 	IWK_DBG((IWK_DEBUG_EEPROM,
6014 	    "interrupt link1=0x%X ", iwk_mem_read(sc, err_ptr)));
6015 	err_ptr += sizeof (uint32_t);
6016 	IWK_DBG((IWK_DEBUG_EEPROM,
6017 	    "interrupt link2=0x%X ", iwk_mem_read(sc, err_ptr)));
6018 	err_ptr += sizeof (uint32_t);
6019 	IWK_DBG((IWK_DEBUG_EEPROM, "data1=0x%X ", iwk_mem_read(sc, err_ptr)));
6020 	err_ptr += sizeof (uint32_t);
6021 	IWK_DBG((IWK_DEBUG_EEPROM, "data2=0x%X ", iwk_mem_read(sc, err_ptr)));
6022 	err_ptr += sizeof (uint32_t);
6023 	IWK_DBG((IWK_DEBUG_EEPROM, "line=%d ", iwk_mem_read(sc, err_ptr)));
6024 	err_ptr += sizeof (uint32_t);
6025 	IWK_DBG((IWK_DEBUG_EEPROM, "bcon_time=%d ", iwk_mem_read(sc, err_ptr)));
6026 	err_ptr += sizeof (uint32_t);
6027 	IWK_DBG((IWK_DEBUG_EEPROM, "tsf_low=%d ", iwk_mem_read(sc, err_ptr)));
6028 	err_ptr += sizeof (uint32_t);
6029 	IWK_DBG((IWK_DEBUG_EEPROM, "tsf_hi=%d\n", iwk_mem_read(sc, err_ptr)));
6030 
6031 	iwk_mac_access_exit(sc);
6032 }
6033 
6034 static int
6035 iwk_run_state_config_ibss(ieee80211com_t *ic)
6036 {
6037 	iwk_sc_t *sc = (iwk_sc_t *)ic;
6038 	ieee80211_node_t *in = ic->ic_bss;
6039 	int i, err = IWK_SUCCESS;
6040 
6041 	mutex_enter(&sc->sc_ibss.node_tb_lock);
6042 
6043 	/*
6044 	 * clean all nodes in ibss node table assure be
6045 	 * consistent with hardware
6046 	 */
6047 	for (i = IWK_STA_ID; i < IWK_STATION_COUNT; i++) {
6048 		sc->sc_ibss.ibss_node_tb[i].used = 0;
6049 		(void) memset(&sc->sc_ibss.ibss_node_tb[i].node,
6050 		    0,
6051 		    sizeof (iwk_add_sta_t));
6052 	}
6053 
6054 	sc->sc_ibss.node_number = 0;
6055 
6056 	mutex_exit(&sc->sc_ibss.node_tb_lock);
6057 
6058 	/*
6059 	 * configure RX and TX
6060 	 */
6061 	sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
6062 
6063 	sc->sc_config.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
6064 	sc->sc_config.filter_flags =
6065 	    LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
6066 	    RXON_FILTER_DIS_DECRYPT_MSK |
6067 	    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
6068 
6069 	sc->sc_config.assoc_id = 0;
6070 
6071 	IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
6072 	sc->sc_config.chan = ieee80211_chan2ieee(ic,
6073 	    in->in_chan);
6074 
6075 	if (ic->ic_curmode == IEEE80211_MODE_11B) {
6076 		sc->sc_config.cck_basic_rates = 0x03;
6077 		sc->sc_config.ofdm_basic_rates = 0;
6078 	} else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
6079 	    (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
6080 		sc->sc_config.cck_basic_rates = 0;
6081 		sc->sc_config.ofdm_basic_rates = 0x15;
6082 
6083 	} else {
6084 		sc->sc_config.cck_basic_rates = 0x0f;
6085 		sc->sc_config.ofdm_basic_rates = 0xff;
6086 	}
6087 
6088 	sc->sc_config.flags &=
6089 	    ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
6090 	    RXON_FLG_SHORT_SLOT_MSK);
6091 
6092 	if (ic->ic_flags & IEEE80211_F_SHSLOT) {
6093 		sc->sc_config.flags |=
6094 		    LE_32(RXON_FLG_SHORT_SLOT_MSK);
6095 	}
6096 
6097 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
6098 		sc->sc_config.flags |=
6099 		    LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
6100 	}
6101 
6102 	sc->sc_config.filter_flags |=
6103 	    LE_32(RXON_FILTER_ASSOC_MSK);
6104 
6105 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
6106 	    sizeof (iwk_rxon_cmd_t), 1);
6107 	if (err != IWK_SUCCESS) {
6108 		cmn_err(CE_WARN, "iwk_run_state_config_ibss(): "
6109 		    "failed to update configuration.\n");
6110 		return (err);
6111 	}
6112 
6113 	return (err);
6114 
6115 }
6116 
6117 static int
6118 iwk_run_state_config_sta(ieee80211com_t *ic)
6119 {
6120 	iwk_sc_t *sc = (iwk_sc_t *)ic;
6121 	ieee80211_node_t *in = ic->ic_bss;
6122 	int err = IWK_SUCCESS;
6123 
6124 	/* update adapter's configuration */
6125 	if (sc->sc_assoc_id != in->in_associd) {
6126 		cmn_err(CE_WARN, "iwk_run_state_config_sta(): "
6127 		    "associate ID mismatch: expected %d, "
6128 		    "got %d\n",
6129 		    in->in_associd, sc->sc_assoc_id);
6130 	}
6131 	sc->sc_config.assoc_id = in->in_associd & 0x3fff;
6132 
6133 	/*
6134 	 * short preamble/slot time are
6135 	 * negotiated when associating
6136 	 */
6137 	sc->sc_config.flags &=
6138 	    ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
6139 	    RXON_FLG_SHORT_SLOT_MSK);
6140 
6141 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
6142 		sc->sc_config.flags |=
6143 		    LE_32(RXON_FLG_SHORT_SLOT_MSK);
6144 
6145 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
6146 		sc->sc_config.flags |=
6147 		    LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
6148 
6149 	sc->sc_config.filter_flags |=
6150 	    LE_32(RXON_FILTER_ASSOC_MSK);
6151 
6152 	if (ic->ic_opmode != IEEE80211_M_STA)
6153 		sc->sc_config.filter_flags |=
6154 		    LE_32(RXON_FILTER_BCON_AWARE_MSK);
6155 
6156 	IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x"
6157 	    " filter_flags %x\n",
6158 	    sc->sc_config.chan, sc->sc_config.flags,
6159 	    sc->sc_config.filter_flags));
6160 
6161 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
6162 	    sizeof (iwk_rxon_cmd_t), 1);
6163 	if (err != IWK_SUCCESS) {
6164 		cmn_err(CE_WARN, "iwk_run_state_config_sta(): "
6165 		    "failed to update configuration\n");
6166 		return (err);
6167 	}
6168 
6169 	return (err);
6170 }
6171 
6172 static int
6173 iwk_start_tx_beacon(ieee80211com_t *ic)
6174 {
6175 	iwk_sc_t *sc = (iwk_sc_t *)ic;
6176 	ieee80211_node_t *in = ic->ic_bss;
6177 	int err = IWK_SUCCESS;
6178 	iwk_tx_beacon_cmd_t  *tx_beacon_p;
6179 	uint16_t  masks = 0;
6180 	mblk_t *mp;
6181 	int rate;
6182 
6183 	/*
6184 	 * allocate and transmit beacon frames
6185 	 */
6186 	tx_beacon_p = &sc->sc_ibss.ibss_beacon.beacon_cmd;
6187 
6188 	(void) memset(tx_beacon_p, 0,
6189 	    sizeof (iwk_tx_beacon_cmd_t));
6190 	rate = 0;
6191 	masks = 0;
6192 
6193 	tx_beacon_p->config.sta_id = IWK_BROADCAST_ID;
6194 	tx_beacon_p->config.stop_time.life_time =
6195 	    LE_32(0xffffffff);
6196 
6197 	if (sc->sc_ibss.ibss_beacon.mp != NULL) {
6198 		freemsg(sc->sc_ibss.ibss_beacon.mp);
6199 		sc->sc_ibss.ibss_beacon.mp = NULL;
6200 	}
6201 
6202 	sc->sc_ibss.ibss_beacon.mp =
6203 	    ieee80211_beacon_alloc(ic, in,
6204 	    &sc->sc_ibss.ibss_beacon.iwk_boff);
6205 	if (sc->sc_ibss.ibss_beacon.mp == NULL) {
6206 		cmn_err(CE_WARN, "iwk_start_tx_beacon(): "
6207 		    "failed to get beacon frame.\n");
6208 		return (IWK_FAIL);
6209 	}
6210 
6211 	mp = sc->sc_ibss.ibss_beacon.mp;
6212 
6213 	ASSERT(mp->b_cont == NULL);
6214 
6215 	bcopy(mp->b_rptr, tx_beacon_p->bcon_frame, MBLKL(mp));
6216 
6217 	tx_beacon_p->config.len = (uint16_t)(MBLKL(mp));
6218 	sc->sc_ibss.ibss_beacon.beacon_cmd_len =
6219 	    sizeof (iwk_tx_cmd_t) +
6220 	    4 + tx_beacon_p->config.len;
6221 
6222 	/*
6223 	 * beacons are sent at 1M
6224 	 */
6225 	rate = in->in_rates.ir_rates[0];
6226 	rate &= IEEE80211_RATE_VAL;
6227 
6228 	if (2 == rate || 4 == rate || 11 == rate ||
6229 	    22 == rate) {
6230 		masks |= RATE_MCS_CCK_MSK;
6231 	}
6232 
6233 	masks |= RATE_MCS_ANT_B_MSK;
6234 
6235 	tx_beacon_p->config.rate.r.rate_n_flags =
6236 	    (iwk_rate_to_plcp(rate) | masks);
6237 
6238 
6239 	tx_beacon_p->config.tx_flags =
6240 	    (TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK);
6241 
6242 	if (ic->ic_bss->in_tstamp.tsf != 0) {
6243 		sc->sc_ibss.ibss_beacon.syncbeacon = 1;
6244 	} else {
6245 		if (ieee80211_beacon_update(ic, in,
6246 		    &sc->sc_ibss.ibss_beacon.iwk_boff,
6247 		    mp, 0)) {
6248 			bcopy(mp->b_rptr,
6249 			    tx_beacon_p->bcon_frame,
6250 			    MBLKL(mp));
6251 		}
6252 
6253 		err = iwk_cmd(sc, REPLY_TX_BEACON,
6254 		    tx_beacon_p,
6255 		    sc->sc_ibss.ibss_beacon.beacon_cmd_len,
6256 		    1);
6257 		if (err != IWK_SUCCESS) {
6258 			cmn_err(CE_WARN, "iwk_start_tx_beacon(): "
6259 			    "failed to TX beacon.\n");
6260 			return (err);
6261 		}
6262 
6263 		sc->sc_ibss.ibss_beacon.syncbeacon = 0;
6264 	}
6265 
6266 	return (err);
6267 }
6268 
6269 static int
6270 iwk_clean_add_node_ibss(struct ieee80211com *ic,
6271     uint8_t addr[IEEE80211_ADDR_LEN], uint8_t *index2)
6272 {
6273 	iwk_sc_t *sc = (iwk_sc_t *)ic;
6274 	uint8_t	index;
6275 	iwk_add_sta_t bc_node;
6276 	iwk_link_quality_cmd_t bc_link_quality;
6277 	iwk_link_quality_cmd_t link_quality;
6278 	uint16_t  bc_masks = 0;
6279 	uint16_t  masks = 0;
6280 	int i, rate;
6281 	struct ieee80211_rateset rs;
6282 	iwk_ibss_node_t *ibss_node_p;
6283 	int err = IWK_SUCCESS;
6284 
6285 	/*
6286 	 * find a location that is not
6287 	 * used in ibss node table
6288 	 */
6289 	for (index = IWK_STA_ID;
6290 	    index < IWK_STATION_COUNT; index++) {
6291 		if (!sc->sc_ibss.ibss_node_tb[index].used) {
6292 			break;
6293 		}
6294 	}
6295 
6296 	/*
6297 	 * if have too many nodes in hardware, clean up
6298 	 */
6299 	if (index < IWK_BROADCAST_ID &&
6300 	    sc->sc_ibss.node_number >= 25) {
6301 		if (iwk_cmd(sc, REPLY_REMOVE_ALL_STA,
6302 		    NULL, 0, 1) != IWK_SUCCESS) {
6303 			cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6304 			    "failed to remove all nodes in hardware\n");
6305 			return (IWK_FAIL);
6306 		}
6307 
6308 		for (i = IWK_STA_ID; i < IWK_STATION_COUNT; i++) {
6309 			sc->sc_ibss.ibss_node_tb[i].used = 0;
6310 			(void) memset(&sc->sc_ibss.ibss_node_tb[i].node,
6311 			    0, sizeof (iwk_add_sta_t));
6312 		}
6313 
6314 		sc->sc_ibss.node_number = 0;
6315 
6316 		/*
6317 		 * add broadcast node so that we
6318 		 * can send broadcast frame
6319 		 */
6320 		(void) memset(&bc_node, 0, sizeof (bc_node));
6321 		(void) memset(bc_node.bssid, 0xff, 6);
6322 		bc_node.id = IWK_BROADCAST_ID;
6323 
6324 		err = iwk_cmd(sc, REPLY_ADD_STA, &bc_node, sizeof (bc_node), 1);
6325 		if (err != IWK_SUCCESS) {
6326 		cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6327 		    "failed to add broadcast node\n");
6328 		return (err);
6329 		}
6330 
6331 		/* TX_LINK_QUALITY cmd */
6332 		(void) memset(&bc_link_quality, 0, sizeof (bc_link_quality));
6333 		for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
6334 			bc_masks |= RATE_MCS_CCK_MSK;
6335 			bc_masks |= RATE_MCS_ANT_B_MSK;
6336 			bc_masks &= ~RATE_MCS_ANT_A_MSK;
6337 			bc_link_quality.rate_n_flags[i] =
6338 			    iwk_rate_to_plcp(2) | bc_masks;
6339 		}
6340 
6341 		bc_link_quality.general_params.single_stream_ant_msk = 2;
6342 		bc_link_quality.general_params.dual_stream_ant_msk = 3;
6343 		bc_link_quality.agg_params.agg_dis_start_th = 3;
6344 		bc_link_quality.agg_params.agg_time_limit = LE_16(4000);
6345 		bc_link_quality.sta_id = IWK_BROADCAST_ID;
6346 
6347 		err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD,
6348 		    &bc_link_quality, sizeof (bc_link_quality), 1);
6349 		if (err != IWK_SUCCESS) {
6350 			cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6351 			    "failed to config link quality table\n");
6352 			return (err);
6353 		}
6354 	}
6355 
6356 	if (index >= IWK_BROADCAST_ID) {
6357 		cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6358 		    "the count of node in hardware is too much\n");
6359 		return (IWK_FAIL);
6360 	}
6361 
6362 	/*
6363 	 * add a node into hardware
6364 	 */
6365 	ibss_node_p = &sc->sc_ibss.ibss_node_tb[index];
6366 
6367 	ibss_node_p->used = 1;
6368 
6369 	(void) memset(&ibss_node_p->node, 0,
6370 	    sizeof (iwk_add_sta_t));
6371 
6372 	IEEE80211_ADDR_COPY(ibss_node_p->node.bssid, addr);
6373 	ibss_node_p->node.id = index;
6374 	ibss_node_p->node.control = 0;
6375 	ibss_node_p->node.flags = 0;
6376 
6377 	err = iwk_cmd(sc, REPLY_ADD_STA, &ibss_node_p->node,
6378 	    sizeof (iwk_add_sta_t), 1);
6379 	if (err != IWK_SUCCESS) {
6380 		cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6381 		    "failed to add IBSS node\n");
6382 		ibss_node_p->used = 0;
6383 		(void) memset(&ibss_node_p->node, 0,
6384 		    sizeof (iwk_add_sta_t));
6385 		return (err);
6386 	}
6387 
6388 	sc->sc_ibss.node_number++;
6389 
6390 	(void) memset(&link_quality, 0, sizeof (link_quality));
6391 
6392 	rs = ic->ic_sup_rates[ieee80211_chan2mode(ic,
6393 	    ic->ic_curchan)];
6394 
6395 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
6396 		if (i < rs.ir_nrates) {
6397 			rate = rs.
6398 			    ir_rates[rs.ir_nrates - i];
6399 		} else {
6400 			rate = 2;
6401 		}
6402 
6403 		if (2 == rate || 4 == rate ||
6404 		    11 == rate || 22 == rate) {
6405 			masks |= RATE_MCS_CCK_MSK;
6406 		}
6407 
6408 		masks |= RATE_MCS_ANT_B_MSK;
6409 		masks &= ~RATE_MCS_ANT_A_MSK;
6410 
6411 		link_quality.rate_n_flags[i] =
6412 		    iwk_rate_to_plcp(rate) | masks;
6413 	}
6414 
6415 	link_quality.general_params.single_stream_ant_msk = 2;
6416 	link_quality.general_params.dual_stream_ant_msk = 3;
6417 	link_quality.agg_params.agg_dis_start_th = 3;
6418 	link_quality.agg_params.agg_time_limit = LE_16(4000);
6419 	link_quality.sta_id = ibss_node_p->node.id;
6420 
6421 	err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD,
6422 	    &link_quality, sizeof (link_quality), 1);
6423 	if (err != IWK_SUCCESS) {
6424 		cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6425 		    "failed to set up TX link quality\n");
6426 		ibss_node_p->used = 0;
6427 		(void) memset(ibss_node_p->node.bssid, 0, 6);
6428 		return (err);
6429 	}
6430 
6431 	*index2 = index;
6432 
6433 	return (err);
6434 }
6435