xref: /titanic_51/usr/src/uts/common/io/iwk/iwk2.c (revision 6b5764c36d253d178caa447fa2a6d7e0c7dfd6e6)
1 /*
2  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright (c) 2007, Intel Corporation
8  * All rights reserved.
9  */
10 
11 /*
12  * Copyright (c) 2006
13  * Copyright (c) 2007
14  *	Damien Bergamini <damien.bergamini@free.fr>
15  *
16  * Permission to use, copy, modify, and distribute this software for any
17  * purpose with or without fee is hereby granted, provided that the above
18  * copyright notice and this permission notice appear in all copies.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
21  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
22  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
24  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
25  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
26  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27  */
28 
29 /*
30  * Driver for Intel PRO/Wireless 4965AGN(kedron) 802.11 network adapters.
31  */
32 
33 #include <sys/types.h>
34 #include <sys/byteorder.h>
35 #include <sys/conf.h>
36 #include <sys/cmn_err.h>
37 #include <sys/stat.h>
38 #include <sys/ddi.h>
39 #include <sys/sunddi.h>
40 #include <sys/strsubr.h>
41 #include <sys/ethernet.h>
42 #include <inet/common.h>
43 #include <inet/nd.h>
44 #include <inet/mi.h>
45 #include <sys/note.h>
46 #include <sys/stream.h>
47 #include <sys/strsun.h>
48 #include <sys/modctl.h>
49 #include <sys/devops.h>
50 #include <sys/dlpi.h>
51 #include <sys/mac_provider.h>
52 #include <sys/mac_wifi.h>
53 #include <sys/net80211.h>
54 #include <sys/net80211_proto.h>
55 #include <sys/varargs.h>
56 #include <sys/policy.h>
57 #include <sys/pci.h>
58 
59 #include "iwk_calibration.h"
60 #include "iwk_hw.h"
61 #include "iwk_eeprom.h"
62 #include "iwk2_var.h"
63 #include <inet/wifi_ioctl.h>
64 
65 #ifdef DEBUG
66 #define	IWK_DEBUG_80211		(1 << 0)
67 #define	IWK_DEBUG_CMD		(1 << 1)
68 #define	IWK_DEBUG_DMA		(1 << 2)
69 #define	IWK_DEBUG_EEPROM	(1 << 3)
70 #define	IWK_DEBUG_FW		(1 << 4)
71 #define	IWK_DEBUG_HW		(1 << 5)
72 #define	IWK_DEBUG_INTR		(1 << 6)
73 #define	IWK_DEBUG_MRR		(1 << 7)
74 #define	IWK_DEBUG_PIO		(1 << 8)
75 #define	IWK_DEBUG_RX		(1 << 9)
76 #define	IWK_DEBUG_SCAN		(1 << 10)
77 #define	IWK_DEBUG_TX		(1 << 11)
78 #define	IWK_DEBUG_RATECTL	(1 << 12)
79 #define	IWK_DEBUG_RADIO		(1 << 13)
80 #define	IWK_DEBUG_RESUME	(1 << 14)
81 #define	IWK_DEBUG_CALIBRATION	(1 << 15)
82 uint32_t iwk_dbg_flags = 0;
83 #define	IWK_DBG(x) \
84 	iwk_dbg x
85 #else
86 #define	IWK_DBG(x)
87 #endif
88 
89 static void	*iwk_soft_state_p = NULL;
90 static uint8_t iwk_fw_bin [] = {
91 #include "fw-iw/iw4965.ucode.hex"
92 };
93 
94 /* DMA attributes for a shared page */
95 static ddi_dma_attr_t sh_dma_attr = {
96 	DMA_ATTR_V0,	/* version of this structure */
97 	0,		/* lowest usable address */
98 	0xffffffffU,	/* highest usable address */
99 	0xffffffffU,	/* maximum DMAable byte count */
100 	0x1000,		/* alignment in bytes */
101 	0x1000,		/* burst sizes (any?) */
102 	1,		/* minimum transfer */
103 	0xffffffffU,	/* maximum transfer */
104 	0xffffffffU,	/* maximum segment length */
105 	1,		/* maximum number of segments */
106 	1,		/* granularity */
107 	0,		/* flags (reserved) */
108 };
109 
110 /* DMA attributes for a keep warm DRAM descriptor */
111 static ddi_dma_attr_t kw_dma_attr = {
112 	DMA_ATTR_V0,	/* version of this structure */
113 	0,		/* lowest usable address */
114 	0xffffffffU,	/* highest usable address */
115 	0xffffffffU,	/* maximum DMAable byte count */
116 	0x1000,		/* alignment in bytes */
117 	0x1000,		/* burst sizes (any?) */
118 	1,		/* minimum transfer */
119 	0xffffffffU,	/* maximum transfer */
120 	0xffffffffU,	/* maximum segment length */
121 	1,		/* maximum number of segments */
122 	1,		/* granularity */
123 	0,		/* flags (reserved) */
124 };
125 
126 /* DMA attributes for a ring descriptor */
127 static ddi_dma_attr_t ring_desc_dma_attr = {
128 	DMA_ATTR_V0,	/* version of this structure */
129 	0,		/* lowest usable address */
130 	0xffffffffU,	/* highest usable address */
131 	0xffffffffU,	/* maximum DMAable byte count */
132 	0x100,		/* alignment in bytes */
133 	0x100,		/* burst sizes (any?) */
134 	1,		/* minimum transfer */
135 	0xffffffffU,	/* maximum transfer */
136 	0xffffffffU,	/* maximum segment length */
137 	1,		/* maximum number of segments */
138 	1,		/* granularity */
139 	0,		/* flags (reserved) */
140 };
141 
142 /* DMA attributes for a cmd */
143 static ddi_dma_attr_t cmd_dma_attr = {
144 	DMA_ATTR_V0,	/* version of this structure */
145 	0,		/* lowest usable address */
146 	0xffffffffU,	/* highest usable address */
147 	0xffffffffU,	/* maximum DMAable byte count */
148 	4,		/* alignment in bytes */
149 	0x100,		/* burst sizes (any?) */
150 	1,		/* minimum transfer */
151 	0xffffffffU,	/* maximum transfer */
152 	0xffffffffU,	/* maximum segment length */
153 	1,		/* maximum number of segments */
154 	1,		/* granularity */
155 	0,		/* flags (reserved) */
156 };
157 
158 /* DMA attributes for a rx buffer */
159 static ddi_dma_attr_t rx_buffer_dma_attr = {
160 	DMA_ATTR_V0,	/* version of this structure */
161 	0,		/* lowest usable address */
162 	0xffffffffU,	/* highest usable address */
163 	0xffffffffU,	/* maximum DMAable byte count */
164 	0x100,		/* alignment in bytes */
165 	0x100,		/* burst sizes (any?) */
166 	1,		/* minimum transfer */
167 	0xffffffffU,	/* maximum transfer */
168 	0xffffffffU,	/* maximum segment length */
169 	1,		/* maximum number of segments */
170 	1,		/* granularity */
171 	0,		/* flags (reserved) */
172 };
173 
174 /*
175  * DMA attributes for a tx buffer.
176  * the maximum number of segments is 4 for the hardware.
177  * now all the wifi drivers put the whole frame in a single
178  * descriptor, so we define the maximum  number of segments 1,
179  * just the same as the rx_buffer. we consider leverage the HW
180  * ability in the future, that is why we don't define rx and tx
181  * buffer_dma_attr as the same.
182  */
183 static ddi_dma_attr_t tx_buffer_dma_attr = {
184 	DMA_ATTR_V0,	/* version of this structure */
185 	0,		/* lowest usable address */
186 	0xffffffffU,	/* highest usable address */
187 	0xffffffffU,	/* maximum DMAable byte count */
188 	4,		/* alignment in bytes */
189 	0x100,		/* burst sizes (any?) */
190 	1,		/* minimum transfer */
191 	0xffffffffU,	/* maximum transfer */
192 	0xffffffffU,	/* maximum segment length */
193 	1,		/* maximum number of segments */
194 	1,		/* granularity */
195 	0,		/* flags (reserved) */
196 };
197 
198 /* DMA attributes for text and data part in the firmware */
199 static ddi_dma_attr_t fw_dma_attr = {
200 	DMA_ATTR_V0,	/* version of this structure */
201 	0,		/* lowest usable address */
202 	0xffffffffU,	/* highest usable address */
203 	0x7fffffff,	/* maximum DMAable byte count */
204 	0x10,		/* alignment in bytes */
205 	0x100,		/* burst sizes (any?) */
206 	1,		/* minimum transfer */
207 	0xffffffffU,	/* maximum transfer */
208 	0xffffffffU,	/* maximum segment length */
209 	1,		/* maximum number of segments */
210 	1,		/* granularity */
211 	0,		/* flags (reserved) */
212 };
213 
214 
215 /* regs access attributes */
216 static ddi_device_acc_attr_t iwk_reg_accattr = {
217 	DDI_DEVICE_ATTR_V0,
218 	DDI_STRUCTURE_LE_ACC,
219 	DDI_STRICTORDER_ACC,
220 	DDI_DEFAULT_ACC
221 };
222 
223 /* DMA access attributes */
224 static ddi_device_acc_attr_t iwk_dma_accattr = {
225 	DDI_DEVICE_ATTR_V0,
226 	DDI_NEVERSWAP_ACC,
227 	DDI_STRICTORDER_ACC,
228 	DDI_DEFAULT_ACC
229 };
230 
231 static int	iwk_ring_init(iwk_sc_t *);
232 static void	iwk_ring_free(iwk_sc_t *);
233 static int	iwk_alloc_shared(iwk_sc_t *);
234 static void	iwk_free_shared(iwk_sc_t *);
235 static int	iwk_alloc_kw(iwk_sc_t *);
236 static void	iwk_free_kw(iwk_sc_t *);
237 static int	iwk_alloc_fw_dma(iwk_sc_t *);
238 static void	iwk_free_fw_dma(iwk_sc_t *);
239 static int	iwk_alloc_rx_ring(iwk_sc_t *);
240 static void	iwk_reset_rx_ring(iwk_sc_t *);
241 static void	iwk_free_rx_ring(iwk_sc_t *);
242 static int	iwk_alloc_tx_ring(iwk_sc_t *, iwk_tx_ring_t *,
243     int, int);
244 static void	iwk_reset_tx_ring(iwk_sc_t *, iwk_tx_ring_t *);
245 static void	iwk_free_tx_ring(iwk_sc_t *, iwk_tx_ring_t *);
246 
247 static ieee80211_node_t *iwk_node_alloc(ieee80211com_t *);
248 static void	iwk_node_free(ieee80211_node_t *);
249 static int	iwk_newstate(ieee80211com_t *, enum ieee80211_state, int);
250 static int	iwk_key_set(ieee80211com_t *, const struct ieee80211_key *,
251     const uint8_t mac[IEEE80211_ADDR_LEN]);
252 static void	iwk_mac_access_enter(iwk_sc_t *);
253 static void	iwk_mac_access_exit(iwk_sc_t *);
254 static uint32_t	iwk_reg_read(iwk_sc_t *, uint32_t);
255 static void	iwk_reg_write(iwk_sc_t *, uint32_t, uint32_t);
256 static void	iwk_reg_write_region_4(iwk_sc_t *, uint32_t,
257 		    uint32_t *, int);
258 static int	iwk_load_firmware(iwk_sc_t *);
259 static void	iwk_rx_intr(iwk_sc_t *, iwk_rx_desc_t *,
260 		    iwk_rx_data_t *);
261 static void	iwk_tx_intr(iwk_sc_t *, iwk_rx_desc_t *,
262 		    iwk_rx_data_t *);
263 static void	iwk_cmd_intr(iwk_sc_t *, iwk_rx_desc_t *);
264 static uint_t   iwk_intr(caddr_t, caddr_t);
265 static int	iwk_eep_load(iwk_sc_t *sc);
266 static void	iwk_get_mac_from_eep(iwk_sc_t *sc);
267 static int	iwk_eep_sem_down(iwk_sc_t *sc);
268 static void	iwk_eep_sem_up(iwk_sc_t *sc);
269 static uint_t   iwk_rx_softintr(caddr_t, caddr_t);
270 static uint8_t	iwk_rate_to_plcp(int);
271 static int	iwk_cmd(iwk_sc_t *, int, const void *, int, int);
272 static void	iwk_set_led(iwk_sc_t *, uint8_t, uint8_t, uint8_t);
273 static int	iwk_hw_set_before_auth(iwk_sc_t *);
274 static int	iwk_scan(iwk_sc_t *);
275 static int	iwk_config(iwk_sc_t *);
276 static void	iwk_stop_master(iwk_sc_t *);
277 static int	iwk_power_up(iwk_sc_t *);
278 static int	iwk_preinit(iwk_sc_t *);
279 static int	iwk_init(iwk_sc_t *);
280 static void	iwk_stop(iwk_sc_t *);
281 static void	iwk_amrr_init(iwk_amrr_t *);
282 static void	iwk_amrr_timeout(iwk_sc_t *);
283 static void	iwk_amrr_ratectl(void *, ieee80211_node_t *);
284 static int32_t	iwk_curr_tempera(iwk_sc_t *sc);
285 static int	iwk_tx_power_calibration(iwk_sc_t *sc);
286 static inline int	iwk_is_24G_band(iwk_sc_t *sc);
287 static inline int	iwk_is_fat_channel(iwk_sc_t *sc);
288 static int	iwk_txpower_grp(uint16_t channel);
289 static struct	iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc,
290     uint16_t channel,
291     int is_24G, int is_fat, int is_hi_chan);
292 static int32_t	iwk_band_number(iwk_sc_t *sc, uint16_t channel);
293 static int	iwk_division(int32_t num, int32_t denom, int32_t *res);
294 static int32_t	iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1,
295     int32_t x2, int32_t y2);
296 static int	iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel,
297     struct iwk_eep_calib_channel_info *chan_info);
298 static int32_t	iwk_voltage_compensation(int32_t eep_voltage,
299     int32_t curr_voltage);
300 static int32_t	iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G);
301 static int	iwk_txpower_table_cmd_init(iwk_sc_t *sc,
302     struct iwk_tx_power_db *tp_db);
303 static void	iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc);
304 static int	iwk_is_associated(iwk_sc_t *sc);
305 static int	iwk_rxgain_diff_init(iwk_sc_t *sc);
306 static int	iwk_rxgain_diff(iwk_sc_t *sc);
307 static int	iwk_rx_sens_init(iwk_sc_t *sc);
308 static int	iwk_rx_sens(iwk_sc_t *sc);
309 static int	iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time);
310 static int	iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time);
311 static void	iwk_recv_mgmt(struct ieee80211com *ic, mblk_t *mp,
312     struct ieee80211_node *in, int subtype, int rssi, uint32_t rstamp);
313 
314 static void	iwk_write_event_log(iwk_sc_t *);
315 static void	iwk_write_error_log(iwk_sc_t *);
316 
317 static int	iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
318 static int	iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
319 static int	iwk_quiesce(dev_info_t *dip);
320 
321 /*
322  * GLD specific operations
323  */
324 static int	iwk_m_stat(void *arg, uint_t stat, uint64_t *val);
325 static int	iwk_m_start(void *arg);
326 static void	iwk_m_stop(void *arg);
327 static int	iwk_m_unicst(void *arg, const uint8_t *macaddr);
328 static int	iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m);
329 static int	iwk_m_promisc(void *arg, boolean_t on);
330 static mblk_t 	*iwk_m_tx(void *arg, mblk_t *mp);
331 static void	iwk_m_ioctl(void *arg, queue_t *wq, mblk_t *mp);
332 static int	iwk_m_setprop(void *arg, const char *pr_name,
333 	mac_prop_id_t wldp_pr_name, uint_t wldp_length, const void *wldp_buf);
334 static int	iwk_m_getprop(void *arg, const char *pr_name,
335 	mac_prop_id_t wldp_pr_name, uint_t pr_flags, uint_t wldp_length,
336 	void *wldp_buf, uint_t *perm);
337 static void	iwk_destroy_locks(iwk_sc_t *sc);
338 static int	iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type);
339 static void	iwk_thread(iwk_sc_t *sc);
340 static void	iwk_watchdog(void *arg);
341 static int	iwk_run_state_config_ibss(ieee80211com_t *ic);
342 static int	iwk_run_state_config_sta(ieee80211com_t *ic);
343 static int	iwk_fast_recover(iwk_sc_t *sc);
344 static int	iwk_start_tx_beacon(ieee80211com_t *ic);
345 static int	iwk_clean_add_node_ibss(struct ieee80211com *ic,
346     uint8_t addr[IEEE80211_ADDR_LEN], uint8_t *index2);
347 
348 /*
349  * Supported rates for 802.11b/g modes (in 500Kbps unit).
350  * 11a and 11n support will be added later.
351  */
352 static const struct ieee80211_rateset iwk_rateset_11b =
353 	{ 4, { 2, 4, 11, 22 } };
354 
355 static const struct ieee80211_rateset iwk_rateset_11g =
356 	{ 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
357 
358 /*
359  * For mfthread only
360  */
361 extern pri_t minclsyspri;
362 
363 #define	DRV_NAME_4965	"iwk"
364 
365 /*
366  * Module Loading Data & Entry Points
367  */
368 DDI_DEFINE_STREAM_OPS(iwk_devops, nulldev, nulldev, iwk_attach,
369     iwk_detach, nodev, NULL, D_MP, NULL, iwk_quiesce);
370 
371 static struct modldrv iwk_modldrv = {
372 	&mod_driverops,
373 	"Intel(R) 4965AGN driver(N)",
374 	&iwk_devops
375 };
376 
377 static struct modlinkage iwk_modlinkage = {
378 	MODREV_1,
379 	&iwk_modldrv,
380 	NULL
381 };
382 
383 int
384 _init(void)
385 {
386 	int	status;
387 
388 	status = ddi_soft_state_init(&iwk_soft_state_p,
389 	    sizeof (iwk_sc_t), 1);
390 	if (status != DDI_SUCCESS)
391 		return (status);
392 
393 	mac_init_ops(&iwk_devops, DRV_NAME_4965);
394 	status = mod_install(&iwk_modlinkage);
395 	if (status != DDI_SUCCESS) {
396 		mac_fini_ops(&iwk_devops);
397 		ddi_soft_state_fini(&iwk_soft_state_p);
398 	}
399 
400 	return (status);
401 }
402 
403 int
404 _fini(void)
405 {
406 	int status;
407 
408 	status = mod_remove(&iwk_modlinkage);
409 	if (status == DDI_SUCCESS) {
410 		mac_fini_ops(&iwk_devops);
411 		ddi_soft_state_fini(&iwk_soft_state_p);
412 	}
413 
414 	return (status);
415 }
416 
417 int
418 _info(struct modinfo *mip)
419 {
420 	return (mod_info(&iwk_modlinkage, mip));
421 }
422 
423 /*
424  * Mac Call Back entries
425  */
426 mac_callbacks_t	iwk_m_callbacks = {
427 	MC_IOCTL | MC_SETPROP | MC_GETPROP,
428 	iwk_m_stat,
429 	iwk_m_start,
430 	iwk_m_stop,
431 	iwk_m_promisc,
432 	iwk_m_multicst,
433 	iwk_m_unicst,
434 	iwk_m_tx,
435 	iwk_m_ioctl,
436 	NULL,
437 	NULL,
438 	NULL,
439 	iwk_m_setprop,
440 	iwk_m_getprop
441 };
442 
443 #ifdef DEBUG
444 void
445 iwk_dbg(uint32_t flags, const char *fmt, ...)
446 {
447 	va_list	ap;
448 
449 	if (flags & iwk_dbg_flags) {
450 		va_start(ap, fmt);
451 		vcmn_err(CE_NOTE, fmt, ap);
452 		va_end(ap);
453 	}
454 }
455 #endif
456 
457 /*
458  * device operations
459  */
460 int
461 iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
462 {
463 	iwk_sc_t		*sc;
464 	ieee80211com_t	*ic;
465 	int			instance, err, i;
466 	char			strbuf[32];
467 	wifi_data_t		wd = { 0 };
468 	mac_register_t		*macp;
469 
470 	int			intr_type;
471 	int			intr_count;
472 	int			intr_actual;
473 
474 	switch (cmd) {
475 	case DDI_ATTACH:
476 		break;
477 	case DDI_RESUME:
478 		sc = ddi_get_soft_state(iwk_soft_state_p,
479 		    ddi_get_instance(dip));
480 		ASSERT(sc != NULL);
481 
482 		mutex_enter(&sc->sc_glock);
483 		sc->sc_flags &= ~IWK_F_SUSPEND;
484 		mutex_exit(&sc->sc_glock);
485 
486 		if (sc->sc_flags & IWK_F_RUNNING)
487 			(void) iwk_init(sc);
488 
489 		mutex_enter(&sc->sc_glock);
490 		sc->sc_flags |= IWK_F_LAZY_RESUME;
491 		mutex_exit(&sc->sc_glock);
492 
493 		IWK_DBG((IWK_DEBUG_RESUME, "iwk: resume\n"));
494 		return (DDI_SUCCESS);
495 	default:
496 		err = DDI_FAILURE;
497 		goto attach_fail1;
498 	}
499 
500 	instance = ddi_get_instance(dip);
501 	err = ddi_soft_state_zalloc(iwk_soft_state_p, instance);
502 	if (err != DDI_SUCCESS) {
503 		cmn_err(CE_WARN,
504 		    "iwk_attach(): failed to allocate soft state\n");
505 		goto attach_fail1;
506 	}
507 	sc = ddi_get_soft_state(iwk_soft_state_p, instance);
508 	sc->sc_dip = dip;
509 
510 	err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0,
511 	    &iwk_reg_accattr, &sc->sc_cfg_handle);
512 	if (err != DDI_SUCCESS) {
513 		cmn_err(CE_WARN,
514 		    "iwk_attach(): failed to map config spaces regs\n");
515 		goto attach_fail2;
516 	}
517 	sc->sc_rev = ddi_get8(sc->sc_cfg_handle,
518 	    (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID));
519 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0x41), 0);
520 	sc->sc_clsz = ddi_get16(sc->sc_cfg_handle,
521 	    (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ));
522 	if (!sc->sc_clsz)
523 		sc->sc_clsz = 16;
524 	sc->sc_clsz = (sc->sc_clsz << 2);
525 	sc->sc_dmabuf_sz = roundup(0x1000 + sizeof (struct ieee80211_frame) +
526 	    IEEE80211_MTU + IEEE80211_CRC_LEN +
527 	    (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
528 	    IEEE80211_WEP_CRCLEN), sc->sc_clsz);
529 	/*
530 	 * Map operating registers
531 	 */
532 	err = ddi_regs_map_setup(dip, 1, &sc->sc_base,
533 	    0, 0, &iwk_reg_accattr, &sc->sc_handle);
534 	if (err != DDI_SUCCESS) {
535 		cmn_err(CE_WARN,
536 		    "iwk_attach(): failed to map device regs\n");
537 		goto attach_fail2a;
538 	}
539 
540 	err = ddi_intr_get_supported_types(dip, &intr_type);
541 	if ((err != DDI_SUCCESS) || (!(intr_type & DDI_INTR_TYPE_FIXED))) {
542 		cmn_err(CE_WARN, "iwk_attach(): "
543 		    "Fixed type interrupt is not supported\n");
544 		goto attach_fail_intr_a;
545 	}
546 
547 	err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &intr_count);
548 	if ((err != DDI_SUCCESS) || (intr_count != 1)) {
549 		cmn_err(CE_WARN, "iwk_attach(): "
550 		    "No fixed interrupts\n");
551 		goto attach_fail_intr_a;
552 	}
553 
554 	sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
555 
556 	err = ddi_intr_alloc(dip, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0,
557 	    intr_count, &intr_actual, 0);
558 	if ((err != DDI_SUCCESS) || (intr_actual != 1)) {
559 		cmn_err(CE_WARN, "iwk_attach(): "
560 		    "ddi_intr_alloc() failed 0x%x\n", err);
561 		goto attach_fail_intr_b;
562 	}
563 
564 	err = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri);
565 	if (err != DDI_SUCCESS) {
566 		cmn_err(CE_WARN, "iwk_attach(): "
567 		    "ddi_intr_get_pri() failed 0x%x\n", err);
568 		goto attach_fail_intr_c;
569 	}
570 
571 	mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER,
572 	    DDI_INTR_PRI(sc->sc_intr_pri));
573 	mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER,
574 	    DDI_INTR_PRI(sc->sc_intr_pri));
575 	mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER,
576 	    DDI_INTR_PRI(sc->sc_intr_pri));
577 	mutex_init(&sc->sc_ibss.node_tb_lock, NULL, MUTEX_DRIVER,
578 	    DDI_INTR_PRI(sc->sc_intr_pri));
579 
580 	cv_init(&sc->sc_fw_cv, NULL, CV_DRIVER, NULL);
581 	cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL);
582 	cv_init(&sc->sc_tx_cv, "tx-ring", CV_DRIVER, NULL);
583 	/*
584 	 * initialize the mfthread
585 	 */
586 	cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL);
587 	sc->sc_mf_thread = NULL;
588 	sc->sc_mf_thread_switch = 0;
589 
590 	/*
591 	 * Allocate shared page.
592 	 */
593 	err = iwk_alloc_shared(sc);
594 	if (err != DDI_SUCCESS) {
595 		cmn_err(CE_WARN, "iwk_attach(): "
596 		    "failed to allocate shared page\n");
597 		goto attach_fail3;
598 	}
599 
600 	/*
601 	 * Allocate keep warm page.
602 	 */
603 	err = iwk_alloc_kw(sc);
604 	if (err != DDI_SUCCESS) {
605 		cmn_err(CE_WARN, "iwk_attach(): "
606 		    "failed to allocate keep warm page\n");
607 		goto attach_fail3a;
608 	}
609 
610 	/*
611 	 * Do some necessary hardware initializations.
612 	 */
613 	err = iwk_preinit(sc);
614 	if (err != DDI_SUCCESS) {
615 		cmn_err(CE_WARN, "iwk_attach(): "
616 		    "failed to init hardware\n");
617 		goto attach_fail4;
618 	}
619 
620 	/* initialize EEPROM */
621 	err = iwk_eep_load(sc);  /* get hardware configurations from eeprom */
622 	if (err != 0) {
623 		cmn_err(CE_WARN, "iwk_attach(): failed to load eeprom\n");
624 		goto attach_fail4;
625 	}
626 
627 	if (sc->sc_eep_map.calib_version < EEP_TX_POWER_VERSION_NEW) {
628 		cmn_err(CE_WARN, "older EEPROM detected\n");
629 		goto attach_fail4;
630 	}
631 
632 	iwk_get_mac_from_eep(sc);
633 
634 	err = iwk_ring_init(sc);
635 	if (err != DDI_SUCCESS) {
636 		cmn_err(CE_WARN, "iwk_attach(): "
637 		    "failed to allocate and initialize ring\n");
638 		goto attach_fail4;
639 	}
640 
641 	sc->sc_hdr = (iwk_firmware_hdr_t *)iwk_fw_bin;
642 
643 	err = iwk_alloc_fw_dma(sc);
644 	if (err != DDI_SUCCESS) {
645 		cmn_err(CE_WARN, "iwk_attach(): "
646 		    "failed to allocate firmware dma\n");
647 		goto attach_fail5;
648 	}
649 
650 	/*
651 	 * Initialize the wifi part, which will be used by
652 	 * generic layer
653 	 */
654 	ic = &sc->sc_ic;
655 	ic->ic_phytype  = IEEE80211_T_OFDM;
656 	ic->ic_opmode   = IEEE80211_M_STA; /* default to BSS mode */
657 	ic->ic_state    = IEEE80211_S_INIT;
658 	ic->ic_maxrssi  = 100; /* experimental number */
659 	ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT |
660 	    IEEE80211_C_PMGT | IEEE80211_C_SHSLOT;
661 	/*
662 	 * use software WEP and TKIP, hardware CCMP;
663 	 */
664 	ic->ic_caps |= IEEE80211_C_AES_CCM;
665 	/*
666 	 * Support WPA/WPA2
667 	 */
668 	ic->ic_caps |= IEEE80211_C_WPA;
669 	/*
670 	 * support Adhoc mode
671 	 */
672 	ic->ic_caps |= IEEE80211_C_IBSS;
673 
674 	/* set supported .11b and .11g rates */
675 	ic->ic_sup_rates[IEEE80211_MODE_11B] = iwk_rateset_11b;
676 	ic->ic_sup_rates[IEEE80211_MODE_11G] = iwk_rateset_11g;
677 
678 	/* set supported .11b and .11g channels (1 through 11) */
679 	for (i = 1; i <= 11; i++) {
680 		ic->ic_sup_channels[i].ich_freq =
681 		    ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
682 		ic->ic_sup_channels[i].ich_flags =
683 		    IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
684 		    IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ |
685 		    IEEE80211_CHAN_PASSIVE;
686 	}
687 	ic->ic_ibss_chan = &ic->ic_sup_channels[0];
688 
689 	ic->ic_xmit = iwk_send;
690 	/*
691 	 * init Wifi layer
692 	 */
693 	ieee80211_attach(ic);
694 
695 	/*
696 	 * different instance has different WPA door
697 	 */
698 	(void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
699 	    ddi_driver_name(dip),
700 	    ddi_get_instance(dip));
701 
702 	/*
703 	 * Override 80211 default routines
704 	 */
705 	sc->sc_newstate = ic->ic_newstate;
706 	ic->ic_newstate = iwk_newstate;
707 	ic->ic_watchdog = iwk_watchdog;
708 	sc->sc_recv_mgmt = ic->ic_recv_mgmt;
709 	ic->ic_recv_mgmt = iwk_recv_mgmt;
710 	ic->ic_node_alloc = iwk_node_alloc;
711 	ic->ic_node_free = iwk_node_free;
712 	ic->ic_crypto.cs_key_set = iwk_key_set;
713 	ieee80211_media_init(ic);
714 	/*
715 	 * initialize default tx key
716 	 */
717 	ic->ic_def_txkey = 0;
718 	err = ddi_intr_add_softint(dip, &sc->sc_soft_hdl, DDI_INTR_SOFTPRI_MAX,
719 	    iwk_rx_softintr, (caddr_t)sc);
720 	if (err != DDI_SUCCESS) {
721 		cmn_err(CE_WARN, "iwk_attach(): "
722 		    "add soft interrupt failed\n");
723 		goto attach_fail7;
724 	}
725 
726 	/*
727 	 * Add the interrupt handler
728 	 */
729 	err = ddi_intr_add_handler(sc->sc_intr_htable[0], iwk_intr,
730 	    (caddr_t)sc, NULL);
731 	if (err != DDI_SUCCESS) {
732 		cmn_err(CE_WARN, "iwk_attach(): "
733 		    "ddi_intr_add_handle() failed\n");
734 		goto attach_fail8;
735 	}
736 
737 	err = ddi_intr_enable(sc->sc_intr_htable[0]);
738 	if (err != DDI_SUCCESS) {
739 		cmn_err(CE_WARN, "iwk_attach(): "
740 		    "ddi_intr_enable() failed\n");
741 		goto attach_fail_intr_d;
742 	}
743 
744 	/*
745 	 * Initialize pointer to device specific functions
746 	 */
747 	wd.wd_secalloc = WIFI_SEC_NONE;
748 	wd.wd_opmode = ic->ic_opmode;
749 	IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr);
750 
751 	macp = mac_alloc(MAC_VERSION);
752 	if (macp == NULL) {
753 		cmn_err(CE_WARN,
754 		    "iwk_attach(): failed to do mac_alloc()\n");
755 		goto attach_fail9;
756 	}
757 
758 	macp->m_type_ident	= MAC_PLUGIN_IDENT_WIFI;
759 	macp->m_driver		= sc;
760 	macp->m_dip		= dip;
761 	macp->m_src_addr	= ic->ic_macaddr;
762 	macp->m_callbacks	= &iwk_m_callbacks;
763 	macp->m_min_sdu		= 0;
764 	macp->m_max_sdu		= IEEE80211_MTU;
765 	macp->m_pdata		= &wd;
766 	macp->m_pdata_size	= sizeof (wd);
767 
768 	/*
769 	 * Register the macp to mac
770 	 */
771 	err = mac_register(macp, &ic->ic_mach);
772 	mac_free(macp);
773 	if (err != DDI_SUCCESS) {
774 		cmn_err(CE_WARN,
775 		    "iwk_attach(): failed to do mac_register()\n");
776 		goto attach_fail9;
777 	}
778 
779 	/*
780 	 * Create minor node of type DDI_NT_NET_WIFI
781 	 */
782 	(void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_4965"%d", instance);
783 	err = ddi_create_minor_node(dip, strbuf, S_IFCHR,
784 	    instance + 1, DDI_NT_NET_WIFI, 0);
785 	if (err != DDI_SUCCESS)
786 		cmn_err(CE_WARN,
787 		    "iwk_attach(): failed to do ddi_create_minor_node()\n");
788 
789 	/*
790 	 * Notify link is down now
791 	 */
792 	mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
793 
794 	/*
795 	 * create the mf thread to handle the link status,
796 	 * recovery fatal error, etc.
797 	 */
798 	sc->sc_mf_thread_switch = 1;
799 	if (sc->sc_mf_thread == NULL)
800 		sc->sc_mf_thread = thread_create((caddr_t)NULL, 0,
801 		    iwk_thread, sc, 0, &p0, TS_RUN, minclsyspri);
802 
803 	sc->sc_flags |= IWK_F_ATTACHED;
804 
805 	return (DDI_SUCCESS);
806 attach_fail9:
807 	(void) ddi_intr_disable(sc->sc_intr_htable[0]);
808 attach_fail_intr_d:
809 	(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
810 
811 attach_fail8:
812 	(void) ddi_intr_remove_softint(sc->sc_soft_hdl);
813 	sc->sc_soft_hdl = NULL;
814 attach_fail7:
815 	ieee80211_detach(ic);
816 attach_fail6:
817 	iwk_free_fw_dma(sc);
818 attach_fail5:
819 	iwk_ring_free(sc);
820 attach_fail4:
821 	iwk_free_kw(sc);
822 attach_fail3a:
823 	iwk_free_shared(sc);
824 attach_fail3:
825 	iwk_destroy_locks(sc);
826 attach_fail_intr_c:
827 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
828 attach_fail_intr_b:
829 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
830 attach_fail_intr_a:
831 	ddi_regs_map_free(&sc->sc_handle);
832 attach_fail2a:
833 	ddi_regs_map_free(&sc->sc_cfg_handle);
834 attach_fail2:
835 	ddi_soft_state_free(iwk_soft_state_p, instance);
836 attach_fail1:
837 	return (err);
838 }
839 
840 int
841 iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
842 {
843 	iwk_sc_t	*sc;
844 	int err;
845 
846 	sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip));
847 	ASSERT(sc != NULL);
848 
849 	switch (cmd) {
850 	case DDI_DETACH:
851 		break;
852 	case DDI_SUSPEND:
853 		mutex_enter(&sc->sc_glock);
854 		sc->sc_flags |= IWK_F_SUSPEND;
855 		mutex_exit(&sc->sc_glock);
856 		if (sc->sc_flags & IWK_F_RUNNING) {
857 			iwk_stop(sc);
858 		}
859 
860 		IWK_DBG((IWK_DEBUG_RESUME, "iwk: suspend\n"));
861 		return (DDI_SUCCESS);
862 	default:
863 		return (DDI_FAILURE);
864 	}
865 
866 	if (!(sc->sc_flags & IWK_F_ATTACHED))
867 		return (DDI_FAILURE);
868 
869 	err = mac_disable(sc->sc_ic.ic_mach);
870 	if (err != DDI_SUCCESS)
871 		return (err);
872 
873 	/*
874 	 * Destroy the mf_thread
875 	 */
876 	mutex_enter(&sc->sc_mt_lock);
877 	sc->sc_mf_thread_switch = 0;
878 	while (sc->sc_mf_thread != NULL) {
879 		if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0)
880 			break;
881 	}
882 	mutex_exit(&sc->sc_mt_lock);
883 
884 	iwk_stop(sc);
885 	DELAY(500000);
886 
887 	/*
888 	 * Unregiste from the MAC layer subsystem
889 	 */
890 	(void) mac_unregister(sc->sc_ic.ic_mach);
891 
892 	mutex_enter(&sc->sc_glock);
893 	iwk_free_fw_dma(sc);
894 	iwk_ring_free(sc);
895 	iwk_free_kw(sc);
896 	iwk_free_shared(sc);
897 	mutex_exit(&sc->sc_glock);
898 
899 	(void) ddi_intr_disable(sc->sc_intr_htable[0]);
900 	(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
901 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
902 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
903 
904 	(void) ddi_intr_remove_softint(sc->sc_soft_hdl);
905 	sc->sc_soft_hdl = NULL;
906 
907 	/*
908 	 * detach ieee80211
909 	 */
910 	ieee80211_detach(&sc->sc_ic);
911 
912 	iwk_destroy_locks(sc);
913 
914 	ddi_regs_map_free(&sc->sc_handle);
915 	ddi_regs_map_free(&sc->sc_cfg_handle);
916 	ddi_remove_minor_node(dip, NULL);
917 	ddi_soft_state_free(iwk_soft_state_p, ddi_get_instance(dip));
918 
919 	return (DDI_SUCCESS);
920 }
921 
922 /*
923  * quiesce(9E) entry point.
924  *
925  * This function is called when the system is single-threaded at high
926  * PIL with preemption disabled. Therefore, this function must not be
927  * blocked.
928  *
929  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
930  * DDI_FAILURE indicates an error condition and should almost never happen.
931  */
932 int
933 iwk_quiesce(dev_info_t *dip)
934 {
935 	iwk_sc_t	*sc;
936 
937 	sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip));
938 	ASSERT(sc != NULL);
939 
940 	/* no message prints and no lock accquisition */
941 #ifdef DEBUG
942 	iwk_dbg_flags = 0;
943 #endif
944 	sc->sc_flags |= IWK_F_QUIESCED;
945 
946 	iwk_stop(sc);
947 
948 	return (DDI_SUCCESS);
949 }
950 
951 static void
952 iwk_destroy_locks(iwk_sc_t *sc)
953 {
954 	cv_destroy(&sc->sc_mt_cv);
955 	mutex_destroy(&sc->sc_mt_lock);
956 	cv_destroy(&sc->sc_tx_cv);
957 	cv_destroy(&sc->sc_cmd_cv);
958 	cv_destroy(&sc->sc_fw_cv);
959 	mutex_destroy(&sc->sc_tx_lock);
960 	mutex_destroy(&sc->sc_glock);
961 }
962 
963 /*
964  * Allocate an area of memory and a DMA handle for accessing it
965  */
966 static int
967 iwk_alloc_dma_mem(iwk_sc_t *sc, size_t memsize,
968     ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p,
969     uint_t dma_flags, iwk_dma_t *dma_p)
970 {
971 	caddr_t vaddr;
972 	int err;
973 
974 	/*
975 	 * Allocate handle
976 	 */
977 	err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p,
978 	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
979 	if (err != DDI_SUCCESS) {
980 		dma_p->dma_hdl = NULL;
981 		return (DDI_FAILURE);
982 	}
983 
984 	/*
985 	 * Allocate memory
986 	 */
987 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
988 	    dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
989 	    DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
990 	if (err != DDI_SUCCESS) {
991 		ddi_dma_free_handle(&dma_p->dma_hdl);
992 		dma_p->dma_hdl = NULL;
993 		dma_p->acc_hdl = NULL;
994 		return (DDI_FAILURE);
995 	}
996 
997 	/*
998 	 * Bind the two together
999 	 */
1000 	dma_p->mem_va = vaddr;
1001 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
1002 	    vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
1003 	    &dma_p->cookie, &dma_p->ncookies);
1004 	if (err != DDI_DMA_MAPPED) {
1005 		ddi_dma_mem_free(&dma_p->acc_hdl);
1006 		ddi_dma_free_handle(&dma_p->dma_hdl);
1007 		dma_p->acc_hdl = NULL;
1008 		dma_p->dma_hdl = NULL;
1009 		return (DDI_FAILURE);
1010 	}
1011 
1012 	dma_p->nslots = ~0U;
1013 	dma_p->size = ~0U;
1014 	dma_p->token = ~0U;
1015 	dma_p->offset = 0;
1016 	return (DDI_SUCCESS);
1017 }
1018 
1019 /*
1020  * Free one allocated area of DMAable memory
1021  */
1022 static void
1023 iwk_free_dma_mem(iwk_dma_t *dma_p)
1024 {
1025 	if (dma_p->dma_hdl != NULL) {
1026 		if (dma_p->ncookies) {
1027 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
1028 			dma_p->ncookies = 0;
1029 		}
1030 		ddi_dma_free_handle(&dma_p->dma_hdl);
1031 		dma_p->dma_hdl = NULL;
1032 	}
1033 
1034 	if (dma_p->acc_hdl != NULL) {
1035 		ddi_dma_mem_free(&dma_p->acc_hdl);
1036 		dma_p->acc_hdl = NULL;
1037 	}
1038 }
1039 
1040 /*
1041  *
1042  */
1043 static int
1044 iwk_alloc_fw_dma(iwk_sc_t *sc)
1045 {
1046 	int err = DDI_SUCCESS;
1047 	iwk_dma_t *dma_p;
1048 	char *t;
1049 
1050 	/*
1051 	 * firmware image layout:
1052 	 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->|
1053 	 */
1054 	t = (char *)(sc->sc_hdr + 1);
1055 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz),
1056 	    &fw_dma_attr, &iwk_dma_accattr,
1057 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1058 	    &sc->sc_dma_fw_text);
1059 	dma_p = &sc->sc_dma_fw_text;
1060 	IWK_DBG((IWK_DEBUG_DMA, "text[ncookies:%d addr:%lx size:%lx]\n",
1061 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1062 	    dma_p->cookie.dmac_size));
1063 	if (err != DDI_SUCCESS) {
1064 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1065 		    " text dma memory");
1066 		goto fail;
1067 	}
1068 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->textsz));
1069 
1070 	t += LE_32(sc->sc_hdr->textsz);
1071 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1072 	    &fw_dma_attr, &iwk_dma_accattr,
1073 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1074 	    &sc->sc_dma_fw_data);
1075 	dma_p = &sc->sc_dma_fw_data;
1076 	IWK_DBG((IWK_DEBUG_DMA, "data[ncookies:%d addr:%lx size:%lx]\n",
1077 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1078 	    dma_p->cookie.dmac_size));
1079 	if (err != DDI_SUCCESS) {
1080 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1081 		    " data dma memory");
1082 		goto fail;
1083 	}
1084 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1085 
1086 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1087 	    &fw_dma_attr, &iwk_dma_accattr,
1088 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1089 	    &sc->sc_dma_fw_data_bak);
1090 	dma_p = &sc->sc_dma_fw_data_bak;
1091 	IWK_DBG((IWK_DEBUG_DMA, "data_bak[ncookies:%d addr:%lx "
1092 	    "size:%lx]\n",
1093 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1094 	    dma_p->cookie.dmac_size));
1095 	if (err != DDI_SUCCESS) {
1096 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1097 		    " data bakeup dma memory");
1098 		goto fail;
1099 	}
1100 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1101 
1102 	t += LE_32(sc->sc_hdr->datasz);
1103 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz),
1104 	    &fw_dma_attr, &iwk_dma_accattr,
1105 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1106 	    &sc->sc_dma_fw_init_text);
1107 	dma_p = &sc->sc_dma_fw_init_text;
1108 	IWK_DBG((IWK_DEBUG_DMA, "init_text[ncookies:%d addr:%lx "
1109 	    "size:%lx]\n",
1110 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1111 	    dma_p->cookie.dmac_size));
1112 	if (err != DDI_SUCCESS) {
1113 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1114 		    "init text dma memory");
1115 		goto fail;
1116 	}
1117 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_textsz));
1118 
1119 	t += LE_32(sc->sc_hdr->init_textsz);
1120 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz),
1121 	    &fw_dma_attr, &iwk_dma_accattr,
1122 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1123 	    &sc->sc_dma_fw_init_data);
1124 	dma_p = &sc->sc_dma_fw_init_data;
1125 	IWK_DBG((IWK_DEBUG_DMA, "init_data[ncookies:%d addr:%lx "
1126 	    "size:%lx]\n",
1127 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1128 	    dma_p->cookie.dmac_size));
1129 	if (err != DDI_SUCCESS) {
1130 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1131 		    "init data dma memory");
1132 		goto fail;
1133 	}
1134 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_datasz));
1135 
1136 	sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz);
1137 fail:
1138 	return (err);
1139 }
1140 
1141 static void
1142 iwk_free_fw_dma(iwk_sc_t *sc)
1143 {
1144 	iwk_free_dma_mem(&sc->sc_dma_fw_text);
1145 	iwk_free_dma_mem(&sc->sc_dma_fw_data);
1146 	iwk_free_dma_mem(&sc->sc_dma_fw_data_bak);
1147 	iwk_free_dma_mem(&sc->sc_dma_fw_init_text);
1148 	iwk_free_dma_mem(&sc->sc_dma_fw_init_data);
1149 }
1150 
1151 /*
1152  * Allocate a shared page between host and NIC.
1153  */
1154 static int
1155 iwk_alloc_shared(iwk_sc_t *sc)
1156 {
1157 	iwk_dma_t *dma_p;
1158 	int err = DDI_SUCCESS;
1159 
1160 	/* must be aligned on a 4K-page boundary */
1161 	err = iwk_alloc_dma_mem(sc, sizeof (iwk_shared_t),
1162 	    &sh_dma_attr, &iwk_dma_accattr,
1163 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1164 	    &sc->sc_dma_sh);
1165 	if (err != DDI_SUCCESS)
1166 		goto fail;
1167 	sc->sc_shared = (iwk_shared_t *)sc->sc_dma_sh.mem_va;
1168 
1169 	dma_p = &sc->sc_dma_sh;
1170 	IWK_DBG((IWK_DEBUG_DMA, "sh[ncookies:%d addr:%lx size:%lx]\n",
1171 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1172 	    dma_p->cookie.dmac_size));
1173 
1174 	return (err);
1175 fail:
1176 	iwk_free_shared(sc);
1177 	return (err);
1178 }
1179 
1180 static void
1181 iwk_free_shared(iwk_sc_t *sc)
1182 {
1183 	iwk_free_dma_mem(&sc->sc_dma_sh);
1184 }
1185 
1186 /*
1187  * Allocate a keep warm page.
1188  */
1189 static int
1190 iwk_alloc_kw(iwk_sc_t *sc)
1191 {
1192 	iwk_dma_t *dma_p;
1193 	int err = DDI_SUCCESS;
1194 
1195 	/* must be aligned on a 4K-page boundary */
1196 	err = iwk_alloc_dma_mem(sc, IWK_KW_SIZE,
1197 	    &kw_dma_attr, &iwk_dma_accattr,
1198 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1199 	    &sc->sc_dma_kw);
1200 	if (err != DDI_SUCCESS)
1201 		goto fail;
1202 
1203 	dma_p = &sc->sc_dma_kw;
1204 	IWK_DBG((IWK_DEBUG_DMA, "kw[ncookies:%d addr:%lx size:%lx]\n",
1205 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1206 	    dma_p->cookie.dmac_size));
1207 
1208 	return (err);
1209 fail:
1210 	iwk_free_kw(sc);
1211 	return (err);
1212 }
1213 
1214 static void
1215 iwk_free_kw(iwk_sc_t *sc)
1216 {
1217 	iwk_free_dma_mem(&sc->sc_dma_kw);
1218 }
1219 
1220 static int
1221 iwk_alloc_rx_ring(iwk_sc_t *sc)
1222 {
1223 	iwk_rx_ring_t *ring;
1224 	iwk_rx_data_t *data;
1225 	iwk_dma_t *dma_p;
1226 	int i, err = DDI_SUCCESS;
1227 
1228 	ring = &sc->sc_rxq;
1229 	ring->cur = 0;
1230 
1231 	err = iwk_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t),
1232 	    &ring_desc_dma_attr, &iwk_dma_accattr,
1233 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1234 	    &ring->dma_desc);
1235 	if (err != DDI_SUCCESS) {
1236 		cmn_err(CE_WARN, "dma alloc rx ring desc failed\n");
1237 		goto fail;
1238 	}
1239 	ring->desc = (uint32_t *)ring->dma_desc.mem_va;
1240 	dma_p = &ring->dma_desc;
1241 	IWK_DBG((IWK_DEBUG_DMA, "rx bd[ncookies:%d addr:%lx size:%lx]\n",
1242 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1243 	    dma_p->cookie.dmac_size));
1244 
1245 	/*
1246 	 * Allocate Rx buffers.
1247 	 */
1248 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1249 		data = &ring->data[i];
1250 		err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1251 		    &rx_buffer_dma_attr, &iwk_dma_accattr,
1252 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1253 		    &data->dma_data);
1254 		if (err != DDI_SUCCESS) {
1255 			cmn_err(CE_WARN, "dma alloc rx ring buf[%d] "
1256 			    "failed\n", i);
1257 			goto fail;
1258 		}
1259 		/*
1260 		 * the physical address bit [8-36] are used,
1261 		 * instead of bit [0-31] in 3945.
1262 		 */
1263 		ring->desc[i] = LE_32((uint32_t)
1264 		    (data->dma_data.cookie.dmac_address >> 8));
1265 	}
1266 	dma_p = &ring->data[0].dma_data;
1267 	IWK_DBG((IWK_DEBUG_DMA, "rx buffer[0][ncookies:%d addr:%lx "
1268 	    "size:%lx]\n",
1269 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1270 	    dma_p->cookie.dmac_size));
1271 
1272 	IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1273 
1274 	return (err);
1275 
1276 fail:
1277 	iwk_free_rx_ring(sc);
1278 	return (err);
1279 }
1280 
1281 static void
1282 iwk_reset_rx_ring(iwk_sc_t *sc)
1283 {
1284 	int n;
1285 
1286 	iwk_mac_access_enter(sc);
1287 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1288 	for (n = 0; n < 2000; n++) {
1289 		if (IWK_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24))
1290 			break;
1291 		DELAY(1000);
1292 	}
1293 
1294 	if (n == 2000)
1295 		IWK_DBG((IWK_DEBUG_DMA, "timeout resetting Rx ring\n"));
1296 
1297 	iwk_mac_access_exit(sc);
1298 
1299 	sc->sc_rxq.cur = 0;
1300 }
1301 
1302 static void
1303 iwk_free_rx_ring(iwk_sc_t *sc)
1304 {
1305 	int i;
1306 
1307 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1308 		if (sc->sc_rxq.data[i].dma_data.dma_hdl)
1309 			IWK_DMA_SYNC(sc->sc_rxq.data[i].dma_data,
1310 			    DDI_DMA_SYNC_FORCPU);
1311 		iwk_free_dma_mem(&sc->sc_rxq.data[i].dma_data);
1312 	}
1313 
1314 	if (sc->sc_rxq.dma_desc.dma_hdl)
1315 		IWK_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV);
1316 	iwk_free_dma_mem(&sc->sc_rxq.dma_desc);
1317 }
1318 
1319 static int
1320 iwk_alloc_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring,
1321     int slots, int qid)
1322 {
1323 	iwk_tx_data_t *data;
1324 	iwk_tx_desc_t *desc_h;
1325 	uint32_t paddr_desc_h;
1326 	iwk_cmd_t *cmd_h;
1327 	uint32_t paddr_cmd_h;
1328 	iwk_dma_t *dma_p;
1329 	int i, err = DDI_SUCCESS;
1330 
1331 	ring->qid = qid;
1332 	ring->count = TFD_QUEUE_SIZE_MAX;
1333 	ring->window = slots;
1334 	ring->queued = 0;
1335 	ring->cur = 0;
1336 
1337 	err = iwk_alloc_dma_mem(sc,
1338 	    TFD_QUEUE_SIZE_MAX * sizeof (iwk_tx_desc_t),
1339 	    &ring_desc_dma_attr, &iwk_dma_accattr,
1340 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1341 	    &ring->dma_desc);
1342 	if (err != DDI_SUCCESS) {
1343 		cmn_err(CE_WARN, "dma alloc tx ring desc[%d] "
1344 		    "failed\n", qid);
1345 		goto fail;
1346 	}
1347 	dma_p = &ring->dma_desc;
1348 	IWK_DBG((IWK_DEBUG_DMA, "tx bd[ncookies:%d addr:%lx size:%lx]\n",
1349 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1350 	    dma_p->cookie.dmac_size));
1351 
1352 	desc_h = (iwk_tx_desc_t *)ring->dma_desc.mem_va;
1353 	paddr_desc_h = ring->dma_desc.cookie.dmac_address;
1354 
1355 	err = iwk_alloc_dma_mem(sc,
1356 	    TFD_QUEUE_SIZE_MAX * sizeof (iwk_cmd_t),
1357 	    &cmd_dma_attr, &iwk_dma_accattr,
1358 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1359 	    &ring->dma_cmd);
1360 	if (err != DDI_SUCCESS) {
1361 		cmn_err(CE_WARN, "dma alloc tx ring cmd[%d] "
1362 		    "failed\n", qid);
1363 		goto fail;
1364 	}
1365 	dma_p = &ring->dma_cmd;
1366 	IWK_DBG((IWK_DEBUG_DMA, "tx cmd[ncookies:%d addr:%lx size:%lx]\n",
1367 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1368 	    dma_p->cookie.dmac_size));
1369 
1370 	cmd_h = (iwk_cmd_t *)ring->dma_cmd.mem_va;
1371 	paddr_cmd_h = ring->dma_cmd.cookie.dmac_address;
1372 
1373 	/*
1374 	 * Allocate Tx buffers.
1375 	 */
1376 	ring->data = kmem_zalloc(sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX,
1377 	    KM_NOSLEEP);
1378 	if (ring->data == NULL) {
1379 		cmn_err(CE_WARN, "could not allocate tx data slots\n");
1380 		goto fail;
1381 	}
1382 
1383 	for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) {
1384 		data = &ring->data[i];
1385 		err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1386 		    &tx_buffer_dma_attr, &iwk_dma_accattr,
1387 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
1388 		    &data->dma_data);
1389 		if (err != DDI_SUCCESS) {
1390 			cmn_err(CE_WARN, "dma alloc tx ring "
1391 			    "buf[%d] failed\n", i);
1392 			goto fail;
1393 		}
1394 
1395 		data->desc = desc_h + i;
1396 		data->paddr_desc = paddr_desc_h +
1397 		    _PTRDIFF(data->desc, desc_h);
1398 		data->cmd = cmd_h +  i; /* (i % slots); */
1399 		/* ((i % slots) * sizeof (iwk_cmd_t)); */
1400 		data->paddr_cmd = paddr_cmd_h +
1401 		    _PTRDIFF(data->cmd, cmd_h);
1402 	}
1403 	dma_p = &ring->data[0].dma_data;
1404 	IWK_DBG((IWK_DEBUG_DMA, "tx buffer[0][ncookies:%d addr:%lx "
1405 	    "size:%lx]\n",
1406 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1407 	    dma_p->cookie.dmac_size));
1408 
1409 	return (err);
1410 
1411 fail:
1412 	if (ring->data)
1413 		kmem_free(ring->data,
1414 		    sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX);
1415 	iwk_free_tx_ring(sc, ring);
1416 	return (err);
1417 }
1418 
1419 static void
1420 iwk_reset_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring)
1421 {
1422 	iwk_tx_data_t *data;
1423 	int i, n;
1424 
1425 	iwk_mac_access_enter(sc);
1426 
1427 	IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0);
1428 	for (n = 0; n < 200; n++) {
1429 		if (IWK_READ(sc, IWK_FH_TSSR_TX_STATUS_REG) &
1430 		    IWK_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid))
1431 			break;
1432 		DELAY(10);
1433 	}
1434 	if (n == 200) {
1435 		IWK_DBG((IWK_DEBUG_DMA, "timeout reset tx ring %d\n",
1436 		    ring->qid));
1437 	}
1438 	iwk_mac_access_exit(sc);
1439 
1440 	for (i = 0; i < ring->count; i++) {
1441 		data = &ring->data[i];
1442 		IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
1443 	}
1444 
1445 	ring->queued = 0;
1446 	ring->cur = 0;
1447 }
1448 
1449 /*ARGSUSED*/
1450 static void
1451 iwk_free_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring)
1452 {
1453 	int i;
1454 
1455 	if (ring->dma_desc.dma_hdl != NULL)
1456 		IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1457 	iwk_free_dma_mem(&ring->dma_desc);
1458 
1459 	if (ring->dma_cmd.dma_hdl != NULL)
1460 		IWK_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV);
1461 	iwk_free_dma_mem(&ring->dma_cmd);
1462 
1463 	if (ring->data != NULL) {
1464 		for (i = 0; i < ring->count; i++) {
1465 			if (ring->data[i].dma_data.dma_hdl)
1466 				IWK_DMA_SYNC(ring->data[i].dma_data,
1467 				    DDI_DMA_SYNC_FORDEV);
1468 			iwk_free_dma_mem(&ring->data[i].dma_data);
1469 		}
1470 		kmem_free(ring->data, ring->count * sizeof (iwk_tx_data_t));
1471 	}
1472 }
1473 
1474 static int
1475 iwk_ring_init(iwk_sc_t *sc)
1476 {
1477 	int i, err = DDI_SUCCESS;
1478 
1479 	for (i = 0; i < IWK_NUM_QUEUES; i++) {
1480 		if (i == IWK_CMD_QUEUE_NUM)
1481 			continue;
1482 		err = iwk_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS,
1483 		    i);
1484 		if (err != DDI_SUCCESS)
1485 			goto fail;
1486 	}
1487 	err = iwk_alloc_tx_ring(sc, &sc->sc_txq[IWK_CMD_QUEUE_NUM],
1488 	    TFD_CMD_SLOTS, IWK_CMD_QUEUE_NUM);
1489 	if (err != DDI_SUCCESS)
1490 		goto fail;
1491 	err = iwk_alloc_rx_ring(sc);
1492 	if (err != DDI_SUCCESS)
1493 		goto fail;
1494 	return (err);
1495 
1496 fail:
1497 	return (err);
1498 }
1499 
1500 static void
1501 iwk_ring_free(iwk_sc_t *sc)
1502 {
1503 	int i = IWK_NUM_QUEUES;
1504 
1505 	iwk_free_rx_ring(sc);
1506 	while (--i >= 0) {
1507 		iwk_free_tx_ring(sc, &sc->sc_txq[i]);
1508 	}
1509 }
1510 
1511 /* ARGSUSED */
1512 static ieee80211_node_t *
1513 iwk_node_alloc(ieee80211com_t *ic)
1514 {
1515 	iwk_amrr_t *amrr;
1516 
1517 	amrr = kmem_zalloc(sizeof (iwk_amrr_t), KM_SLEEP);
1518 	if (amrr != NULL)
1519 		iwk_amrr_init(amrr);
1520 	return (&amrr->in);
1521 }
1522 
1523 static void
1524 iwk_node_free(ieee80211_node_t *in)
1525 {
1526 	ieee80211com_t *ic = in->in_ic;
1527 
1528 	ic->ic_node_cleanup(in);
1529 	if (in->in_wpa_ie != NULL)
1530 		ieee80211_free(in->in_wpa_ie);
1531 	kmem_free(in, sizeof (iwk_amrr_t));
1532 }
1533 
1534 /*ARGSUSED*/
1535 static int
1536 iwk_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1537 {
1538 	iwk_sc_t *sc = (iwk_sc_t *)ic;
1539 	ieee80211_node_t *in = ic->ic_bss;
1540 	enum ieee80211_state ostate = ic->ic_state;
1541 	int i, err = IWK_SUCCESS;
1542 
1543 	mutex_enter(&sc->sc_glock);
1544 	switch (nstate) {
1545 	case IEEE80211_S_SCAN:
1546 		switch (ostate) {
1547 		case IEEE80211_S_INIT:
1548 		{
1549 			iwk_add_sta_t node;
1550 
1551 			sc->sc_flags |= IWK_F_SCANNING;
1552 			sc->sc_scan_pending = 0;
1553 			iwk_set_led(sc, 2, 10, 2);
1554 
1555 			/*
1556 			 * clear association to receive beacons from
1557 			 * all BSS'es
1558 			 */
1559 			sc->sc_config.assoc_id = 0;
1560 			sc->sc_config.filter_flags &=
1561 			    ~LE_32(RXON_FILTER_ASSOC_MSK);
1562 
1563 			IWK_DBG((IWK_DEBUG_80211, "config chan %d "
1564 			    "flags %x filter_flags %x\n", sc->sc_config.chan,
1565 			    sc->sc_config.flags, sc->sc_config.filter_flags));
1566 
1567 			err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
1568 			    sizeof (iwk_rxon_cmd_t), 1);
1569 			if (err != IWK_SUCCESS) {
1570 				cmn_err(CE_WARN,
1571 				    "could not clear association\n");
1572 				sc->sc_flags &= ~IWK_F_SCANNING;
1573 				mutex_exit(&sc->sc_glock);
1574 				return (err);
1575 			}
1576 
1577 			/* add broadcast node to send probe request */
1578 			(void) memset(&node, 0, sizeof (node));
1579 			(void) memset(&node.bssid, 0xff, IEEE80211_ADDR_LEN);
1580 			node.id = IWK_BROADCAST_ID;
1581 			err = iwk_cmd(sc, REPLY_ADD_STA, &node,
1582 			    sizeof (node), 1);
1583 			if (err != IWK_SUCCESS) {
1584 				cmn_err(CE_WARN, "could not add "
1585 				    "broadcast node\n");
1586 				sc->sc_flags &= ~IWK_F_SCANNING;
1587 				mutex_exit(&sc->sc_glock);
1588 				return (err);
1589 			}
1590 			break;
1591 		}
1592 
1593 		case IEEE80211_S_AUTH:
1594 		case IEEE80211_S_ASSOC:
1595 		case IEEE80211_S_RUN:
1596 			sc->sc_flags |= IWK_F_SCANNING;
1597 			sc->sc_scan_pending = 0;
1598 
1599 			iwk_set_led(sc, 2, 10, 2);
1600 			/* FALLTHRU */
1601 		case IEEE80211_S_SCAN:
1602 			mutex_exit(&sc->sc_glock);
1603 			/* step to next channel before actual FW scan */
1604 			err = sc->sc_newstate(ic, nstate, arg);
1605 			mutex_enter(&sc->sc_glock);
1606 			if ((err != 0) || ((err = iwk_scan(sc)) != 0)) {
1607 				cmn_err(CE_WARN,
1608 				    "could not initiate scan\n");
1609 				sc->sc_flags &= ~IWK_F_SCANNING;
1610 				ieee80211_cancel_scan(ic);
1611 			}
1612 			mutex_exit(&sc->sc_glock);
1613 			return (err);
1614 		default:
1615 			break;
1616 
1617 		}
1618 		sc->sc_clk = 0;
1619 		break;
1620 
1621 	case IEEE80211_S_AUTH:
1622 		if (ostate == IEEE80211_S_SCAN) {
1623 			sc->sc_flags &= ~IWK_F_SCANNING;
1624 		}
1625 
1626 		/* reset state to handle reassociations correctly */
1627 		sc->sc_config.assoc_id = 0;
1628 		sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
1629 
1630 		/*
1631 		 * before sending authentication and association request frame,
1632 		 * we need do something in the hardware, such as setting the
1633 		 * channel same to the target AP...
1634 		 */
1635 		if ((err = iwk_hw_set_before_auth(sc)) != 0) {
1636 			cmn_err(CE_WARN, "could not setup firmware for "
1637 			    "authentication\n");
1638 			mutex_exit(&sc->sc_glock);
1639 			return (err);
1640 		}
1641 		break;
1642 
1643 	case IEEE80211_S_RUN:
1644 		if (ostate == IEEE80211_S_SCAN) {
1645 			sc->sc_flags &= ~IWK_F_SCANNING;
1646 		}
1647 
1648 		if (ic->ic_opmode == IEEE80211_M_MONITOR) {
1649 			/* let LED blink when monitoring */
1650 			iwk_set_led(sc, 2, 10, 10);
1651 			break;
1652 		}
1653 		IWK_DBG((IWK_DEBUG_80211, "iwk: associated."));
1654 
1655 		/* IBSS mode */
1656 		if (ic->ic_opmode == IEEE80211_M_IBSS) {
1657 			/*
1658 			 * clean all nodes in ibss node table
1659 			 * in order to be consistent with hardware
1660 			 */
1661 			err = iwk_run_state_config_ibss(ic);
1662 			if (err != IWK_SUCCESS) {
1663 				cmn_err(CE_WARN, "iwk_newstate(): "
1664 				    "failed to update configuration "
1665 				    "in IBSS mode\n");
1666 				mutex_exit(&sc->sc_glock);
1667 				return (err);
1668 			}
1669 		}
1670 
1671 		/* none IBSS mode */
1672 		if (ic->ic_opmode != IEEE80211_M_IBSS) {
1673 			/* update adapter's configuration */
1674 			err = iwk_run_state_config_sta(ic);
1675 			if (err != IWK_SUCCESS) {
1676 				cmn_err(CE_WARN, "iwk_newstate(): "
1677 				    "failed to update configuration "
1678 				    "in none IBSS mode\n");
1679 				mutex_exit(&sc->sc_glock);
1680 				return (err);
1681 			}
1682 		}
1683 
1684 		/* obtain current temperature of chipset */
1685 		sc->sc_tempera = iwk_curr_tempera(sc);
1686 
1687 		/*
1688 		 * make Tx power calibration to determine
1689 		 * the gains of DSP and radio
1690 		 */
1691 		err = iwk_tx_power_calibration(sc);
1692 		if (err) {
1693 			cmn_err(CE_WARN, "iwk_newstate(): "
1694 			    "failed to set tx power table\n");
1695 			mutex_exit(&sc->sc_glock);
1696 			return (err);
1697 		}
1698 
1699 		if (ic->ic_opmode == IEEE80211_M_IBSS) {
1700 
1701 			/*
1702 			 * allocate and transmit beacon frames
1703 			 */
1704 			err = iwk_start_tx_beacon(ic);
1705 			if (err != IWK_SUCCESS) {
1706 				cmn_err(CE_WARN, "iwk_newstate(): "
1707 				    "can't transmit beacon frames\n");
1708 				mutex_exit(&sc->sc_glock);
1709 				return (err);
1710 			}
1711 		}
1712 
1713 		/* start automatic rate control */
1714 		mutex_enter(&sc->sc_mt_lock);
1715 		if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
1716 			sc->sc_flags |= IWK_F_RATE_AUTO_CTL;
1717 			/* set rate to some reasonable initial value */
1718 			i = in->in_rates.ir_nrates - 1;
1719 			while (i > 0 && IEEE80211_RATE(i) > 72)
1720 				i--;
1721 			in->in_txrate = i;
1722 		} else {
1723 			sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL;
1724 		}
1725 		mutex_exit(&sc->sc_mt_lock);
1726 
1727 		/* set LED on after associated */
1728 		iwk_set_led(sc, 2, 0, 1);
1729 		break;
1730 
1731 	case IEEE80211_S_INIT:
1732 		if (ostate == IEEE80211_S_SCAN) {
1733 			sc->sc_flags &= ~IWK_F_SCANNING;
1734 		}
1735 
1736 		/* set LED off after init */
1737 		iwk_set_led(sc, 2, 1, 0);
1738 		break;
1739 	case IEEE80211_S_ASSOC:
1740 		if (ostate == IEEE80211_S_SCAN) {
1741 			sc->sc_flags &= ~IWK_F_SCANNING;
1742 		}
1743 
1744 		break;
1745 	}
1746 
1747 	mutex_exit(&sc->sc_glock);
1748 
1749 	err = sc->sc_newstate(ic, nstate, arg);
1750 
1751 	if (nstate == IEEE80211_S_RUN) {
1752 
1753 		mutex_enter(&sc->sc_glock);
1754 
1755 		/*
1756 		 * make initialization for Receiver
1757 		 * sensitivity calibration
1758 		 */
1759 		err = iwk_rx_sens_init(sc);
1760 		if (err) {
1761 			cmn_err(CE_WARN, "iwk_newstate(): "
1762 			    "failed to init RX sensitivity\n");
1763 			mutex_exit(&sc->sc_glock);
1764 			return (err);
1765 		}
1766 
1767 		/* make initialization for Receiver gain balance */
1768 		err = iwk_rxgain_diff_init(sc);
1769 		if (err) {
1770 			cmn_err(CE_WARN, "iwk_newstate(): "
1771 			    "failed to init phy calibration\n");
1772 			mutex_exit(&sc->sc_glock);
1773 			return (err);
1774 		}
1775 
1776 		mutex_exit(&sc->sc_glock);
1777 
1778 	}
1779 
1780 	return (err);
1781 }
1782 
1783 static void
1784 iwk_watchdog(void *arg)
1785 {
1786 	iwk_sc_t *sc = arg;
1787 	struct ieee80211com *ic = &sc->sc_ic;
1788 #ifdef DEBUG
1789 	timeout_id_t timeout_id = ic->ic_watchdog_timer;
1790 #endif
1791 
1792 	ieee80211_stop_watchdog(ic);
1793 
1794 	if ((ic->ic_state != IEEE80211_S_AUTH) &&
1795 	    (ic->ic_state != IEEE80211_S_ASSOC))
1796 		return;
1797 
1798 	if (ic->ic_bss->in_fails > 0) {
1799 		IWK_DBG((IWK_DEBUG_80211, "watchdog (0x%x) reset: "
1800 		    "node (0x%x)\n", timeout_id, &ic->ic_bss));
1801 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
1802 	} else {
1803 		IWK_DBG((IWK_DEBUG_80211, "watchdog (0x%x) timeout: "
1804 		    "node (0x%x), retry (%d)\n",
1805 		    timeout_id, &ic->ic_bss, ic->ic_bss->in_fails + 1));
1806 		ieee80211_watchdog(ic);
1807 	}
1808 }
1809 
1810 /*ARGSUSED*/
1811 static int iwk_key_set(ieee80211com_t *ic, const struct ieee80211_key *k,
1812     const uint8_t mac[IEEE80211_ADDR_LEN])
1813 {
1814 	iwk_sc_t *sc = (iwk_sc_t *)ic;
1815 	iwk_add_sta_t node;
1816 	int err;
1817 	uint8_t index1;
1818 
1819 	switch (k->wk_cipher->ic_cipher) {
1820 	case IEEE80211_CIPHER_WEP:
1821 	case IEEE80211_CIPHER_TKIP:
1822 		return (1); /* sofeware do it. */
1823 	case IEEE80211_CIPHER_AES_CCM:
1824 		break;
1825 	default:
1826 		return (0);
1827 	}
1828 	sc->sc_config.filter_flags &= ~(RXON_FILTER_DIS_DECRYPT_MSK |
1829 	    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
1830 
1831 	mutex_enter(&sc->sc_glock);
1832 
1833 	/* update ap/multicast node */
1834 	(void) memset(&node, 0, sizeof (node));
1835 	if (IEEE80211_IS_MULTICAST(mac)) {
1836 		(void) memset(node.bssid, 0xff, 6);
1837 		node.id = IWK_BROADCAST_ID;
1838 	} else if (ic->ic_opmode == IEEE80211_M_IBSS) {
1839 		mutex_exit(&sc->sc_glock);
1840 		mutex_enter(&sc->sc_ibss.node_tb_lock);
1841 
1842 		/*
1843 		 * search for node in ibss node table
1844 		 */
1845 		for (index1 = IWK_STA_ID; index1 < IWK_STATION_COUNT;
1846 		    index1++) {
1847 			if (sc->sc_ibss.ibss_node_tb[index1].used &&
1848 			    IEEE80211_ADDR_EQ(sc->sc_ibss.
1849 			    ibss_node_tb[index1].node.bssid,
1850 			    mac)) {
1851 				break;
1852 			}
1853 		}
1854 		if (index1 >= IWK_BROADCAST_ID) {
1855 			cmn_err(CE_WARN, "iwk_key_set(): "
1856 			    "have no this node in hardware node table\n");
1857 			mutex_exit(&sc->sc_ibss.node_tb_lock);
1858 			return (0);
1859 		} else {
1860 			/*
1861 			 * configure key for given node in hardware
1862 			 */
1863 			if (k->wk_flags & IEEE80211_KEY_XMIT) {
1864 				sc->sc_ibss.ibss_node_tb[index1].
1865 				    node.key_flags = 0;
1866 				sc->sc_ibss.ibss_node_tb[index1].
1867 				    node.keyp = k->wk_keyix;
1868 			} else {
1869 				sc->sc_ibss.ibss_node_tb[index1].
1870 				    node.key_flags = (1 << 14);
1871 				sc->sc_ibss.ibss_node_tb[index1].
1872 				    node.keyp = k->wk_keyix + 4;
1873 			}
1874 
1875 			(void) memcpy(sc->sc_ibss.ibss_node_tb[index1].node.key,
1876 			    k->wk_key, k->wk_keylen);
1877 			sc->sc_ibss.ibss_node_tb[index1].node.key_flags |=
1878 			    (STA_KEY_FLG_CCMP | (1 << 3) | (k->wk_keyix << 8));
1879 			sc->sc_ibss.ibss_node_tb[index1].node.sta_mask =
1880 			    STA_MODIFY_KEY_MASK;
1881 			sc->sc_ibss.ibss_node_tb[index1].node.control = 1;
1882 
1883 			mutex_enter(&sc->sc_glock);
1884 			err = iwk_cmd(sc, REPLY_ADD_STA,
1885 			    &sc->sc_ibss.ibss_node_tb[index1].node,
1886 			    sizeof (iwk_add_sta_t), 1);
1887 			if (err != IWK_SUCCESS) {
1888 				cmn_err(CE_WARN, "iwk_key_set(): "
1889 				    "failed to update IBSS node in hardware\n");
1890 				mutex_exit(&sc->sc_glock);
1891 				mutex_exit(&sc->sc_ibss.node_tb_lock);
1892 				return (0);
1893 			}
1894 			mutex_exit(&sc->sc_glock);
1895 		}
1896 		mutex_exit(&sc->sc_ibss.node_tb_lock);
1897 		return (1);
1898 	} else {
1899 		IEEE80211_ADDR_COPY(node.bssid, ic->ic_bss->in_bssid);
1900 		node.id = IWK_AP_ID;
1901 	}
1902 	if (k->wk_flags & IEEE80211_KEY_XMIT) {
1903 		node.key_flags = 0;
1904 		node.keyp = k->wk_keyix;
1905 	} else {
1906 		node.key_flags = (1 << 14);
1907 		node.keyp = k->wk_keyix + 4;
1908 	}
1909 	(void) memcpy(node.key, k->wk_key, k->wk_keylen);
1910 	node.key_flags |= (STA_KEY_FLG_CCMP | (1 << 3) | (k->wk_keyix << 8));
1911 	node.sta_mask = STA_MODIFY_KEY_MASK;
1912 	node.control = 1;
1913 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
1914 	if (err != IWK_SUCCESS) {
1915 		cmn_err(CE_WARN, "iwk_key_set():"
1916 		    "failed to update ap node\n");
1917 		mutex_exit(&sc->sc_glock);
1918 		return (0);
1919 	}
1920 	mutex_exit(&sc->sc_glock);
1921 	return (1);
1922 }
1923 
1924 /*
1925  * exclusive access to mac begin.
1926  */
1927 static void
1928 iwk_mac_access_enter(iwk_sc_t *sc)
1929 {
1930 	uint32_t tmp;
1931 	int n;
1932 
1933 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
1934 	IWK_WRITE(sc, CSR_GP_CNTRL,
1935 	    tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1936 
1937 	/* wait until we succeed */
1938 	for (n = 0; n < 1000; n++) {
1939 		if ((IWK_READ(sc, CSR_GP_CNTRL) &
1940 		    (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1941 		    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) ==
1942 		    CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN)
1943 			break;
1944 		DELAY(10);
1945 	}
1946 	if (n == 1000)
1947 		IWK_DBG((IWK_DEBUG_PIO, "could not lock memory\n"));
1948 }
1949 
1950 /*
1951  * exclusive access to mac end.
1952  */
1953 static void
1954 iwk_mac_access_exit(iwk_sc_t *sc)
1955 {
1956 	uint32_t tmp = IWK_READ(sc, CSR_GP_CNTRL);
1957 	IWK_WRITE(sc, CSR_GP_CNTRL,
1958 	    tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1959 }
1960 
1961 static uint32_t
1962 iwk_mem_read(iwk_sc_t *sc, uint32_t addr)
1963 {
1964 	IWK_WRITE(sc, HBUS_TARG_MEM_RADDR, addr);
1965 	return (IWK_READ(sc, HBUS_TARG_MEM_RDAT));
1966 }
1967 
1968 static void
1969 iwk_mem_write(iwk_sc_t *sc, uint32_t addr, uint32_t data)
1970 {
1971 	IWK_WRITE(sc, HBUS_TARG_MEM_WADDR, addr);
1972 	IWK_WRITE(sc, HBUS_TARG_MEM_WDAT, data);
1973 }
1974 
1975 static uint32_t
1976 iwk_reg_read(iwk_sc_t *sc, uint32_t addr)
1977 {
1978 	IWK_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24));
1979 	return (IWK_READ(sc, HBUS_TARG_PRPH_RDAT));
1980 }
1981 
1982 static void
1983 iwk_reg_write(iwk_sc_t *sc, uint32_t addr, uint32_t data)
1984 {
1985 	IWK_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24));
1986 	IWK_WRITE(sc, HBUS_TARG_PRPH_WDAT, data);
1987 }
1988 
1989 static void
1990 iwk_reg_write_region_4(iwk_sc_t *sc, uint32_t addr,
1991     uint32_t *data, int wlen)
1992 {
1993 	for (; wlen > 0; wlen--, data++, addr += 4)
1994 		iwk_reg_write(sc, addr, *data);
1995 }
1996 
1997 
1998 /*
1999  * ucode load/initialization steps:
2000  * 1)  load Bootstrap State Machine (BSM) with "bootstrap" uCode image.
2001  * BSM contains a small memory that *always* stays powered up, so it can
2002  * retain the bootstrap program even when the card is in a power-saving
2003  * power-down state.  The BSM loads the small program into ARC processor's
2004  * instruction memory when triggered by power-up.
2005  * 2)  load Initialize image via bootstrap program.
2006  * The Initialize image sets up regulatory and calibration data for the
2007  * Runtime/Protocol uCode. This sends a REPLY_ALIVE notification when completed.
2008  * The 4965 reply contains calibration data for temperature, voltage and tx gain
2009  * correction.
2010  */
2011 static int
2012 iwk_load_firmware(iwk_sc_t *sc)
2013 {
2014 	uint32_t *boot_fw = (uint32_t *)sc->sc_boot;
2015 	uint32_t size = sc->sc_hdr->bootsz;
2016 	int n, err = IWK_SUCCESS;
2017 
2018 	/*
2019 	 * The physical address bit [4-35] of the initialize uCode.
2020 	 * In the initialize alive notify interrupt the physical address of
2021 	 * the runtime ucode will be set for loading.
2022 	 */
2023 	iwk_mac_access_enter(sc);
2024 
2025 	iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG,
2026 	    sc->sc_dma_fw_init_text.cookie.dmac_address >> 4);
2027 	iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG,
2028 	    sc->sc_dma_fw_init_data.cookie.dmac_address >> 4);
2029 	iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG,
2030 	    sc->sc_dma_fw_init_text.cookie.dmac_size);
2031 	iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG,
2032 	    sc->sc_dma_fw_init_data.cookie.dmac_size);
2033 
2034 	/* load bootstrap code into BSM memory */
2035 	iwk_reg_write_region_4(sc, BSM_SRAM_LOWER_BOUND, boot_fw,
2036 	    size / sizeof (uint32_t));
2037 
2038 	iwk_reg_write(sc, BSM_WR_MEM_SRC_REG, 0);
2039 	iwk_reg_write(sc, BSM_WR_MEM_DST_REG, RTC_INST_LOWER_BOUND);
2040 	iwk_reg_write(sc, BSM_WR_DWCOUNT_REG, size / sizeof (uint32_t));
2041 
2042 	/*
2043 	 * prepare to load initialize uCode
2044 	 */
2045 	iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
2046 
2047 	/* wait while the adapter is busy loading the firmware */
2048 	for (n = 0; n < 1000; n++) {
2049 		if (!(iwk_reg_read(sc, BSM_WR_CTRL_REG) &
2050 		    BSM_WR_CTRL_REG_BIT_START))
2051 			break;
2052 		DELAY(10);
2053 	}
2054 	if (n == 1000) {
2055 		cmn_err(CE_WARN, "timeout transferring firmware\n");
2056 		err = ETIMEDOUT;
2057 		return (err);
2058 	}
2059 
2060 	/* for future power-save mode use */
2061 	iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
2062 
2063 	iwk_mac_access_exit(sc);
2064 
2065 	return (err);
2066 }
2067 
2068 /*ARGSUSED*/
2069 static void
2070 iwk_rx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data)
2071 {
2072 	ieee80211com_t *ic = &sc->sc_ic;
2073 	iwk_rx_ring_t *ring = &sc->sc_rxq;
2074 	iwk_rx_phy_res_t *stat;
2075 	ieee80211_node_t *in;
2076 	uint32_t *tail;
2077 	struct ieee80211_frame *wh;
2078 	mblk_t *mp;
2079 	uint16_t len, rssi, mrssi, agc;
2080 	int16_t t;
2081 	uint32_t ants, i;
2082 	struct iwk_rx_non_cfg_phy *phyinfo;
2083 
2084 	/* assuming not 11n here. cope with 11n in phase-II */
2085 	stat = (iwk_rx_phy_res_t *)(desc + 1);
2086 	if (stat->cfg_phy_cnt > 20) {
2087 		return;
2088 	}
2089 
2090 	phyinfo = (struct iwk_rx_non_cfg_phy *)stat->non_cfg_phy;
2091 	agc = (phyinfo->agc_info & IWK_AGC_DB_MASK) >> IWK_AGC_DB_POS;
2092 	mrssi = 0;
2093 	ants = (stat->phy_flags & RX_PHY_FLAGS_ANTENNAE_MASK) >>
2094 	    RX_PHY_FLAGS_ANTENNAE_OFFSET;
2095 	for (i = 0; i < 3; i++) {
2096 		if (ants & (1 << i))
2097 			mrssi = MAX(mrssi, phyinfo->rssi_info[i << 1]);
2098 	}
2099 	t = mrssi - agc - 44; /* t is the dBM value */
2100 	/*
2101 	 * convert dBm to percentage ???
2102 	 */
2103 	rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t))) /
2104 	    (75 * 75);
2105 	if (rssi > 100)
2106 		rssi = 100;
2107 	if (rssi < 1)
2108 		rssi = 1;
2109 	len = stat->byte_count;
2110 	tail = (uint32_t *)((uint8_t *)(stat + 1) + stat->cfg_phy_cnt + len);
2111 
2112 	IWK_DBG((IWK_DEBUG_RX, "rx intr: idx=%d phy_len=%x len=%d "
2113 	    "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x "
2114 	    "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat),
2115 	    len, stat->rate.r.s.rate, stat->channel,
2116 	    LE_32(stat->timestampl), stat->non_cfg_phy_cnt,
2117 	    stat->cfg_phy_cnt, LE_32(*tail)));
2118 
2119 	if ((len < 16) || (len > sc->sc_dmabuf_sz)) {
2120 		IWK_DBG((IWK_DEBUG_RX, "rx frame oversize\n"));
2121 		return;
2122 	}
2123 
2124 	/*
2125 	 * discard Rx frames with bad CRC
2126 	 */
2127 	if ((LE_32(*tail) &
2128 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) !=
2129 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) {
2130 		IWK_DBG((IWK_DEBUG_RX, "rx crc error tail: %x\n",
2131 		    LE_32(*tail)));
2132 		sc->sc_rx_err++;
2133 		return;
2134 	}
2135 
2136 	wh = (struct ieee80211_frame *)
2137 	    ((uint8_t *)(stat + 1)+ stat->cfg_phy_cnt);
2138 	if (*(uint8_t *)wh == IEEE80211_FC0_SUBTYPE_ASSOC_RESP) {
2139 		sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2);
2140 		IWK_DBG((IWK_DEBUG_RX, "rx : association id = %x\n",
2141 		    sc->sc_assoc_id));
2142 	}
2143 #ifdef DEBUG
2144 	if (iwk_dbg_flags & IWK_DEBUG_RX)
2145 		ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0);
2146 #endif
2147 	in = ieee80211_find_rxnode(ic, wh);
2148 	mp = allocb(len, BPRI_MED);
2149 	if (mp) {
2150 		(void) memcpy(mp->b_wptr, wh, len);
2151 		mp->b_wptr += len;
2152 
2153 		/* send the frame to the 802.11 layer */
2154 		(void) ieee80211_input(ic, mp, in, rssi, 0);
2155 	} else {
2156 		sc->sc_rx_nobuf++;
2157 		IWK_DBG((IWK_DEBUG_RX,
2158 		    "iwk_rx_intr(): alloc rx buf failed\n"));
2159 	}
2160 	/* release node reference */
2161 	ieee80211_free_node(in);
2162 }
2163 
2164 /*ARGSUSED*/
2165 static void
2166 iwk_tx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data)
2167 {
2168 	ieee80211com_t *ic = &sc->sc_ic;
2169 	iwk_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3];
2170 	iwk_tx_stat_t *stat = (iwk_tx_stat_t *)(desc + 1);
2171 	iwk_amrr_t *amrr = (iwk_amrr_t *)ic->ic_bss;
2172 
2173 	IWK_DBG((IWK_DEBUG_TX, "tx done: qid=%d idx=%d"
2174 	    " retries=%d frame_count=%x nkill=%d "
2175 	    "rate=%x duration=%d status=%x\n",
2176 	    desc->hdr.qid, desc->hdr.idx, stat->ntries, stat->frame_count,
2177 	    stat->bt_kill_count, stat->rate.r.s.rate,
2178 	    LE_32(stat->duration), LE_32(stat->status)));
2179 
2180 	amrr->txcnt++;
2181 	IWK_DBG((IWK_DEBUG_RATECTL, "tx: %d cnt\n", amrr->txcnt));
2182 	if (stat->ntries > 0) {
2183 		amrr->retrycnt++;
2184 		sc->sc_tx_retries++;
2185 		IWK_DBG((IWK_DEBUG_TX, "tx: %d retries\n",
2186 		    sc->sc_tx_retries));
2187 	}
2188 
2189 	sc->sc_tx_timer = 0;
2190 
2191 	mutex_enter(&sc->sc_tx_lock);
2192 	ring->queued--;
2193 	if (ring->queued < 0)
2194 		ring->queued = 0;
2195 	if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count << 3))) {
2196 		sc->sc_need_reschedule = 0;
2197 		mutex_exit(&sc->sc_tx_lock);
2198 		mac_tx_update(ic->ic_mach);
2199 		mutex_enter(&sc->sc_tx_lock);
2200 	}
2201 	mutex_exit(&sc->sc_tx_lock);
2202 }
2203 
2204 static void
2205 iwk_cmd_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc)
2206 {
2207 	if ((desc->hdr.qid & 7) != 4) {
2208 		return;
2209 	}
2210 	mutex_enter(&sc->sc_glock);
2211 	sc->sc_flags |= IWK_F_CMD_DONE;
2212 	cv_signal(&sc->sc_cmd_cv);
2213 	mutex_exit(&sc->sc_glock);
2214 	IWK_DBG((IWK_DEBUG_CMD, "rx cmd: "
2215 	    "qid=%x idx=%d flags=%x type=0x%x\n",
2216 	    desc->hdr.qid, desc->hdr.idx, desc->hdr.flags,
2217 	    desc->hdr.type));
2218 }
2219 
2220 static void
2221 iwk_ucode_alive(iwk_sc_t *sc, iwk_rx_desc_t *desc)
2222 {
2223 	uint32_t base, i;
2224 	struct iwk_alive_resp *ar =
2225 	    (struct iwk_alive_resp *)(desc + 1);
2226 
2227 	/* the microcontroller is ready */
2228 	IWK_DBG((IWK_DEBUG_FW,
2229 	    "microcode alive notification minor: %x major: %x type:"
2230 	    " %x subtype: %x\n",
2231 	    ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype));
2232 
2233 	if (LE_32(ar->is_valid) != UCODE_VALID_OK) {
2234 		IWK_DBG((IWK_DEBUG_FW,
2235 		    "microcontroller initialization failed\n"));
2236 	}
2237 	if (ar->ver_subtype == INITIALIZE_SUBTYPE) {
2238 		IWK_DBG((IWK_DEBUG_FW,
2239 		    "initialization alive received.\n"));
2240 		(void) memcpy(&sc->sc_card_alive_init, ar,
2241 		    sizeof (struct iwk_init_alive_resp));
2242 		/* XXX get temperature */
2243 		iwk_mac_access_enter(sc);
2244 		iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG,
2245 		    sc->sc_dma_fw_text.cookie.dmac_address >> 4);
2246 		iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG,
2247 		    sc->sc_dma_fw_data_bak.cookie.dmac_address >> 4);
2248 		iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG,
2249 		    sc->sc_dma_fw_data.cookie.dmac_size);
2250 		iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG,
2251 		    sc->sc_dma_fw_text.cookie.dmac_size | 0x80000000);
2252 		iwk_mac_access_exit(sc);
2253 	} else {
2254 		IWK_DBG((IWK_DEBUG_FW, "runtime alive received.\n"));
2255 		(void) memcpy(&sc->sc_card_alive_run, ar,
2256 		    sizeof (struct iwk_alive_resp));
2257 
2258 		/*
2259 		 * Init SCD related registers to make Tx work. XXX
2260 		 */
2261 		iwk_mac_access_enter(sc);
2262 
2263 		/* read sram address of data base */
2264 		sc->sc_scd_base = iwk_reg_read(sc, SCD_SRAM_BASE_ADDR);
2265 
2266 		/* clear and init SCD_CONTEXT_DATA_OFFSET area. 128 bytes */
2267 		for (base = sc->sc_scd_base + SCD_CONTEXT_DATA_OFFSET, i = 0;
2268 		    i < 128; i += 4)
2269 			iwk_mem_write(sc, base + i, 0);
2270 
2271 		/* clear and init SCD_TX_STTS_BITMAP_OFFSET area. 256 bytes */
2272 		for (base = sc->sc_scd_base + SCD_TX_STTS_BITMAP_OFFSET;
2273 		    i < 256; i += 4)
2274 			iwk_mem_write(sc, base + i, 0);
2275 
2276 		/* clear and init SCD_TRANSLATE_TBL_OFFSET area. 32 bytes */
2277 		for (base = sc->sc_scd_base + SCD_TRANSLATE_TBL_OFFSET;
2278 		    i < sizeof (uint16_t) * IWK_NUM_QUEUES; i += 4)
2279 			iwk_mem_write(sc, base + i, 0);
2280 
2281 		iwk_reg_write(sc, SCD_DRAM_BASE_ADDR,
2282 		    sc->sc_dma_sh.cookie.dmac_address >> 10);
2283 		iwk_reg_write(sc, SCD_QUEUECHAIN_SEL, 0);
2284 
2285 		/* initiate the tx queues */
2286 		for (i = 0; i < IWK_NUM_QUEUES; i++) {
2287 			iwk_reg_write(sc, SCD_QUEUE_RDPTR(i), 0);
2288 			IWK_WRITE(sc, HBUS_TARG_WRPTR, (i << 8));
2289 			iwk_mem_write(sc, sc->sc_scd_base +
2290 			    SCD_CONTEXT_QUEUE_OFFSET(i),
2291 			    (SCD_WIN_SIZE & 0x7f));
2292 			iwk_mem_write(sc, sc->sc_scd_base +
2293 			    SCD_CONTEXT_QUEUE_OFFSET(i) + sizeof (uint32_t),
2294 			    (SCD_FRAME_LIMIT & 0x7f) << 16);
2295 		}
2296 		/* interrupt enable on each queue0-7 */
2297 		iwk_reg_write(sc, SCD_INTERRUPT_MASK,
2298 		    (1 << IWK_NUM_QUEUES) - 1);
2299 		/* enable  each channel 0-7 */
2300 		iwk_reg_write(sc, SCD_TXFACT,
2301 		    SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
2302 		/*
2303 		 * queue 0-7 maps to FIFO 0-7 and
2304 		 * all queues work under FIFO mode (none-scheduler-ack)
2305 		 */
2306 		for (i = 0; i < 7; i++) {
2307 			iwk_reg_write(sc,
2308 			    SCD_QUEUE_STATUS_BITS(i),
2309 			    (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
2310 			    (i << SCD_QUEUE_STTS_REG_POS_TXF)|
2311 			    SCD_QUEUE_STTS_REG_MSK);
2312 		}
2313 		iwk_mac_access_exit(sc);
2314 
2315 		sc->sc_flags |= IWK_F_FW_INIT;
2316 		cv_signal(&sc->sc_fw_cv);
2317 	}
2318 
2319 }
2320 
2321 static uint_t
2322 /* LINTED: argument unused in function: unused */
2323 iwk_rx_softintr(caddr_t arg, caddr_t unused)
2324 {
2325 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2326 	ieee80211com_t *ic = &sc->sc_ic;
2327 	iwk_rx_desc_t *desc;
2328 	iwk_rx_data_t *data;
2329 	uint32_t index;
2330 
2331 	mutex_enter(&sc->sc_glock);
2332 	if (sc->sc_rx_softint_pending != 1) {
2333 		mutex_exit(&sc->sc_glock);
2334 		return (DDI_INTR_UNCLAIMED);
2335 	}
2336 	/* disable interrupts */
2337 	IWK_WRITE(sc, CSR_INT_MASK, 0);
2338 	mutex_exit(&sc->sc_glock);
2339 
2340 	/*
2341 	 * firmware has moved the index of the rx queue, driver get it,
2342 	 * and deal with it.
2343 	 */
2344 	index = LE_32(sc->sc_shared->val0) & 0xfff;
2345 
2346 	while (sc->sc_rxq.cur != index) {
2347 		data = &sc->sc_rxq.data[sc->sc_rxq.cur];
2348 		desc = (iwk_rx_desc_t *)data->dma_data.mem_va;
2349 
2350 		IWK_DBG((IWK_DEBUG_INTR, "rx notification index = %d"
2351 		    " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n",
2352 		    index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx,
2353 		    desc->hdr.flags, desc->hdr.type, LE_32(desc->len)));
2354 
2355 		/* a command other than a tx need to be replied */
2356 		if (!(desc->hdr.qid & 0x80) &&
2357 		    (desc->hdr.type != REPLY_RX_PHY_CMD) &&
2358 		    (desc->hdr.type != REPLY_TX) &&
2359 		    (desc->hdr.type != REPLY_TX_PWR_TABLE_CMD) &&
2360 		    (desc->hdr.type != REPLY_PHY_CALIBRATION_CMD) &&
2361 		    (desc->hdr.type != SENSITIVITY_CMD))
2362 			iwk_cmd_intr(sc, desc);
2363 
2364 		switch (desc->hdr.type) {
2365 		case REPLY_4965_RX:
2366 			iwk_rx_intr(sc, desc, data);
2367 			break;
2368 
2369 		case REPLY_TX:
2370 			iwk_tx_intr(sc, desc, data);
2371 			break;
2372 
2373 		case REPLY_ALIVE:
2374 			iwk_ucode_alive(sc, desc);
2375 			break;
2376 
2377 		case CARD_STATE_NOTIFICATION:
2378 		{
2379 			uint32_t *status = (uint32_t *)(desc + 1);
2380 
2381 			IWK_DBG((IWK_DEBUG_RADIO, "state changed to %x\n",
2382 			    LE_32(*status)));
2383 
2384 			if (LE_32(*status) & 1) {
2385 				/*
2386 				 * the radio button has to be pushed(OFF). It
2387 				 * is considered as a hw error, the
2388 				 * iwk_thread() tries to recover it after the
2389 				 * button is pushed again(ON)
2390 				 */
2391 				cmn_err(CE_NOTE,
2392 				    "iwk_rx_softintr(): "
2393 				    "Radio transmitter is off\n");
2394 				sc->sc_ostate = sc->sc_ic.ic_state;
2395 				ieee80211_new_state(&sc->sc_ic,
2396 				    IEEE80211_S_INIT, -1);
2397 				sc->sc_flags |=
2398 				    (IWK_F_HW_ERR_RECOVER | IWK_F_RADIO_OFF);
2399 			}
2400 			break;
2401 		}
2402 		case SCAN_START_NOTIFICATION:
2403 		{
2404 			iwk_start_scan_t *scan =
2405 			    (iwk_start_scan_t *)(desc + 1);
2406 
2407 			IWK_DBG((IWK_DEBUG_SCAN,
2408 			    "scanning channel %d status %x\n",
2409 			    scan->chan, LE_32(scan->status)));
2410 
2411 			ic->ic_curchan = &ic->ic_sup_channels[scan->chan];
2412 			break;
2413 		}
2414 		case SCAN_COMPLETE_NOTIFICATION:
2415 		{
2416 			iwk_stop_scan_t *scan =
2417 			    (iwk_stop_scan_t *)(desc + 1);
2418 
2419 			IWK_DBG((IWK_DEBUG_SCAN,
2420 			    "completed channel %d (burst of %d) status %02x\n",
2421 			    scan->chan, scan->nchan, scan->status));
2422 
2423 			sc->sc_scan_pending++;
2424 			break;
2425 		}
2426 		case STATISTICS_NOTIFICATION:
2427 			/* handle statistics notification */
2428 			iwk_statistics_notify(sc, desc);
2429 			break;
2430 		}
2431 
2432 		sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE;
2433 	}
2434 
2435 	/*
2436 	 * driver dealt with what reveived in rx queue and tell the information
2437 	 * to the firmware.
2438 	 */
2439 	index = (index == 0) ? RX_QUEUE_SIZE - 1 : index - 1;
2440 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7));
2441 
2442 	mutex_enter(&sc->sc_glock);
2443 	/* re-enable interrupts */
2444 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2445 	sc->sc_rx_softint_pending = 0;
2446 	mutex_exit(&sc->sc_glock);
2447 
2448 	return (DDI_INTR_CLAIMED);
2449 }
2450 
2451 static uint_t
2452 /* LINTED: argument unused in function: unused */
2453 iwk_intr(caddr_t arg, caddr_t unused)
2454 {
2455 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2456 	uint32_t r, rfh;
2457 
2458 	mutex_enter(&sc->sc_glock);
2459 
2460 	if (sc->sc_flags & IWK_F_SUSPEND) {
2461 		mutex_exit(&sc->sc_glock);
2462 		return (DDI_INTR_UNCLAIMED);
2463 	}
2464 
2465 	r = IWK_READ(sc, CSR_INT);
2466 	if (r == 0 || r == 0xffffffff) {
2467 		mutex_exit(&sc->sc_glock);
2468 		return (DDI_INTR_UNCLAIMED);
2469 	}
2470 
2471 	IWK_DBG((IWK_DEBUG_INTR, "interrupt reg %x\n", r));
2472 
2473 	rfh = IWK_READ(sc, CSR_FH_INT_STATUS);
2474 	IWK_DBG((IWK_DEBUG_INTR, "FH interrupt reg %x\n", rfh));
2475 	/* disable interrupts */
2476 	IWK_WRITE(sc, CSR_INT_MASK, 0);
2477 	/* ack interrupts */
2478 	IWK_WRITE(sc, CSR_INT, r);
2479 	IWK_WRITE(sc, CSR_FH_INT_STATUS, rfh);
2480 
2481 	if (sc->sc_soft_hdl == NULL) {
2482 		mutex_exit(&sc->sc_glock);
2483 		return (DDI_INTR_CLAIMED);
2484 	}
2485 	if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) {
2486 		cmn_err(CE_WARN, "fatal firmware error\n");
2487 		mutex_exit(&sc->sc_glock);
2488 #ifdef DEBUG
2489 		/* dump event and error logs to dmesg */
2490 		iwk_write_error_log(sc);
2491 		iwk_write_event_log(sc);
2492 #endif /* DEBUG */
2493 		iwk_stop(sc);
2494 		sc->sc_ostate = sc->sc_ic.ic_state;
2495 
2496 		/* not capable of fast recovery */
2497 		if (!IWK_CHK_FAST_RECOVER(sc))
2498 			ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
2499 
2500 		sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
2501 		return (DDI_INTR_CLAIMED);
2502 	}
2503 
2504 	if (r & BIT_INT_RF_KILL) {
2505 		IWK_DBG((IWK_DEBUG_RADIO, "RF kill\n"));
2506 	}
2507 
2508 	if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) ||
2509 	    (rfh & FH_INT_RX_MASK)) {
2510 		sc->sc_rx_softint_pending = 1;
2511 		(void) ddi_intr_trigger_softint(sc->sc_soft_hdl, NULL);
2512 	}
2513 
2514 	if (r & BIT_INT_ALIVE)	{
2515 		IWK_DBG((IWK_DEBUG_FW, "firmware initialized.\n"));
2516 	}
2517 
2518 	/* re-enable interrupts */
2519 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2520 	mutex_exit(&sc->sc_glock);
2521 
2522 	return (DDI_INTR_CLAIMED);
2523 }
2524 
2525 static uint8_t
2526 iwk_rate_to_plcp(int rate)
2527 {
2528 	uint8_t ret;
2529 
2530 	switch (rate) {
2531 	/* CCK rates */
2532 	case 2:
2533 		ret = 0xa;
2534 		break;
2535 	case 4:
2536 		ret = 0x14;
2537 		break;
2538 	case 11:
2539 		ret = 0x37;
2540 		break;
2541 	case 22:
2542 		ret = 0x6e;
2543 		break;
2544 	/* OFDM rates */
2545 	case 12:
2546 		ret = 0xd;
2547 		break;
2548 	case 18:
2549 		ret = 0xf;
2550 		break;
2551 	case 24:
2552 		ret = 0x5;
2553 		break;
2554 	case 36:
2555 		ret = 0x7;
2556 		break;
2557 	case 48:
2558 		ret = 0x9;
2559 		break;
2560 	case 72:
2561 		ret = 0xb;
2562 		break;
2563 	case 96:
2564 		ret = 0x1;
2565 		break;
2566 	case 108:
2567 		ret = 0x3;
2568 		break;
2569 	default:
2570 		ret = 0;
2571 		break;
2572 	}
2573 	return (ret);
2574 }
2575 
2576 static mblk_t *
2577 iwk_m_tx(void *arg, mblk_t *mp)
2578 {
2579 	iwk_sc_t	*sc = (iwk_sc_t *)arg;
2580 	ieee80211com_t	*ic = &sc->sc_ic;
2581 	mblk_t			*next;
2582 
2583 	if (sc->sc_flags & IWK_F_SUSPEND) {
2584 		freemsgchain(mp);
2585 		return (NULL);
2586 	}
2587 
2588 	if (ic->ic_state != IEEE80211_S_RUN) {
2589 		freemsgchain(mp);
2590 		return (NULL);
2591 	}
2592 
2593 	if ((sc->sc_flags & IWK_F_HW_ERR_RECOVER) &&
2594 	    IWK_CHK_FAST_RECOVER(sc)) {
2595 		IWK_DBG((IWK_DEBUG_FW, "iwk_m_tx(): hold queue\n"));
2596 		return (mp);
2597 	}
2598 
2599 	while (mp != NULL) {
2600 		next = mp->b_next;
2601 		mp->b_next = NULL;
2602 		if (iwk_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) {
2603 			mp->b_next = next;
2604 			break;
2605 		}
2606 		mp = next;
2607 	}
2608 	return (mp);
2609 }
2610 
2611 /* ARGSUSED */
2612 static int
2613 iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
2614 {
2615 	iwk_sc_t *sc = (iwk_sc_t *)ic;
2616 	iwk_tx_ring_t *ring;
2617 	iwk_tx_desc_t *desc;
2618 	iwk_tx_data_t *data;
2619 	iwk_cmd_t *cmd;
2620 	iwk_tx_cmd_t *tx;
2621 	ieee80211_node_t *in;
2622 	struct ieee80211_frame *wh;
2623 	struct ieee80211_key *k = NULL;
2624 	mblk_t *m, *m0;
2625 	int rate, hdrlen, len, len0, mblen, off, err = IWK_SUCCESS;
2626 	uint16_t masks = 0;
2627 	uint8_t index, index1, index2;
2628 
2629 	ring = &sc->sc_txq[0];
2630 	data = &ring->data[ring->cur];
2631 	desc = data->desc;
2632 	cmd = data->cmd;
2633 	bzero(desc, sizeof (*desc));
2634 	bzero(cmd, sizeof (*cmd));
2635 
2636 	mutex_enter(&sc->sc_tx_lock);
2637 	if (sc->sc_flags & IWK_F_SUSPEND) {
2638 		mutex_exit(&sc->sc_tx_lock);
2639 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
2640 		    IEEE80211_FC0_TYPE_DATA) {
2641 			freemsg(mp);
2642 		}
2643 		err = IWK_FAIL;
2644 		goto exit;
2645 	}
2646 
2647 	if (ring->queued > ring->count - 64) {
2648 		IWK_DBG((IWK_DEBUG_TX, "iwk_send(): no txbuf\n"));
2649 		sc->sc_need_reschedule = 1;
2650 		mutex_exit(&sc->sc_tx_lock);
2651 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
2652 		    IEEE80211_FC0_TYPE_DATA) {
2653 			freemsg(mp);
2654 		}
2655 		sc->sc_tx_nobuf++;
2656 		err = IWK_FAIL;
2657 		goto exit;
2658 	}
2659 	mutex_exit(&sc->sc_tx_lock);
2660 
2661 	hdrlen = sizeof (struct ieee80211_frame);
2662 
2663 	m = allocb(msgdsize(mp) + 32, BPRI_MED);
2664 	if (m == NULL) { /* can not alloc buf, drop this package */
2665 		cmn_err(CE_WARN,
2666 		    "iwk_send(): failed to allocate msgbuf\n");
2667 		freemsg(mp);
2668 		err = IWK_SUCCESS;
2669 		goto exit;
2670 	}
2671 	for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) {
2672 		mblen = MBLKL(m0);
2673 		(void) memcpy(m->b_rptr + off, m0->b_rptr, mblen);
2674 		off += mblen;
2675 	}
2676 	m->b_wptr += off;
2677 	freemsg(mp);
2678 
2679 	wh = (struct ieee80211_frame *)m->b_rptr;
2680 
2681 	if (ic->ic_opmode == IEEE80211_M_IBSS &&
2682 	    (!(IEEE80211_IS_MULTICAST(wh->i_addr1)))) {
2683 		mutex_enter(&sc->sc_glock);
2684 		mutex_enter(&sc->sc_ibss.node_tb_lock);
2685 
2686 		/*
2687 		 * search for node in ibss node table
2688 		 */
2689 		for (index1 = IWK_STA_ID;
2690 		    index1 < IWK_STATION_COUNT; index1++) {
2691 			if (sc->sc_ibss.ibss_node_tb[index1].used &&
2692 			    IEEE80211_ADDR_EQ(sc->sc_ibss.
2693 			    ibss_node_tb[index1].node.bssid,
2694 			    wh->i_addr1)) {
2695 				break;
2696 			}
2697 		}
2698 
2699 		/*
2700 		 * if don't find in ibss node table
2701 		 */
2702 		if (index1 >= IWK_BROADCAST_ID) {
2703 			err = iwk_clean_add_node_ibss(ic,
2704 			    wh->i_addr1, &index2);
2705 			if (err != IWK_SUCCESS) {
2706 				cmn_err(CE_WARN, "iwk_send(): "
2707 				    "failed to clean all nodes "
2708 				    "and add one node\n");
2709 				mutex_exit(&sc->sc_ibss.node_tb_lock);
2710 				mutex_exit(&sc->sc_glock);
2711 				freemsg(m);
2712 				sc->sc_tx_err++;
2713 				err = IWK_SUCCESS;
2714 				goto exit;
2715 			}
2716 			index = index2;
2717 		} else {
2718 			index = index1;
2719 		}
2720 		mutex_exit(&sc->sc_ibss.node_tb_lock);
2721 		mutex_exit(&sc->sc_glock);
2722 	}
2723 
2724 	in = ieee80211_find_txnode(ic, wh->i_addr1);
2725 	if (in == NULL) {
2726 		cmn_err(CE_WARN, "iwk_send(): failed to find tx node\n");
2727 		freemsg(m);
2728 		sc->sc_tx_err++;
2729 		err = IWK_SUCCESS;
2730 		goto exit;
2731 	}
2732 	(void) ieee80211_encap(ic, m, in);
2733 
2734 	cmd->hdr.type = REPLY_TX;
2735 	cmd->hdr.flags = 0;
2736 	cmd->hdr.qid = ring->qid;
2737 	cmd->hdr.idx = ring->cur;
2738 
2739 	tx = (iwk_tx_cmd_t *)cmd->data;
2740 	tx->tx_flags = 0;
2741 
2742 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2743 		tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK));
2744 	} else {
2745 		tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
2746 	}
2747 
2748 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
2749 		k = ieee80211_crypto_encap(ic, m);
2750 		if (k == NULL) {
2751 			freemsg(m);
2752 			sc->sc_tx_err++;
2753 			err = IWK_SUCCESS;
2754 			goto exit;
2755 		}
2756 
2757 		if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
2758 			tx->sec_ctl = 2; /* for CCMP */
2759 			tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
2760 			(void) memcpy(&tx->key, k->wk_key, k->wk_keylen);
2761 		}
2762 
2763 		/* packet header may have moved, reset our local pointer */
2764 		wh = (struct ieee80211_frame *)m->b_rptr;
2765 	}
2766 
2767 	len = msgdsize(m);
2768 
2769 #ifdef DEBUG
2770 	if (iwk_dbg_flags & IWK_DEBUG_TX)
2771 		ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0);
2772 #endif
2773 
2774 	/* pickup a rate */
2775 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
2776 	    IEEE80211_FC0_TYPE_MGT) {
2777 		/* mgmt frames are sent at 1M */
2778 		rate = in->in_rates.ir_rates[0];
2779 	} else {
2780 		/*
2781 		 * do it here for the software way rate control.
2782 		 * later for rate scaling in hardware.
2783 		 * maybe like the following, for management frame:
2784 		 * tx->initial_rate_index = LINK_QUAL_MAX_RETRY_NUM - 1;
2785 		 * for data frame:
2786 		 * tx->tx_flags |= (LE_32(TX_CMD_FLG_STA_RATE_MSK));
2787 		 * rate = in->in_rates.ir_rates[in->in_txrate];
2788 		 * tx->initial_rate_index = 1;
2789 		 *
2790 		 * now the txrate is determined in tx cmd flags, set to the
2791 		 * max value 54M for 11g and 11M for 11b.
2792 		 */
2793 
2794 		if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
2795 			rate = ic->ic_fixed_rate;
2796 		} else {
2797 			rate = in->in_rates.ir_rates[in->in_txrate];
2798 		}
2799 	}
2800 	rate &= IEEE80211_RATE_VAL;
2801 	IWK_DBG((IWK_DEBUG_TX, "tx rate[%d of %d] = %x",
2802 	    in->in_txrate, in->in_rates.ir_nrates, rate));
2803 
2804 	tx->tx_flags |= (LE_32(TX_CMD_FLG_SEQ_CTL_MSK));
2805 
2806 	len0 = roundup(4 + sizeof (iwk_tx_cmd_t) + hdrlen, 4);
2807 	if (len0 != (4 + sizeof (iwk_tx_cmd_t) + hdrlen))
2808 		tx->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
2809 
2810 	/* retrieve destination node's id */
2811 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2812 		tx->sta_id = IWK_BROADCAST_ID;
2813 	} else {
2814 		if (ic->ic_opmode == IEEE80211_M_IBSS)
2815 			tx->sta_id = index;
2816 		else
2817 			tx->sta_id = IWK_AP_ID;
2818 	}
2819 
2820 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
2821 	    IEEE80211_FC0_TYPE_MGT) {
2822 		/* tell h/w to set timestamp in probe responses */
2823 		if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2824 		    IEEE80211_FC0_SUBTYPE_PROBE_RESP)
2825 			tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK);
2826 
2827 		if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2828 		    IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
2829 		    ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2830 		    IEEE80211_FC0_SUBTYPE_REASSOC_REQ))
2831 			tx->timeout.pm_frame_timeout = 3;
2832 		else
2833 			tx->timeout.pm_frame_timeout = 2;
2834 	} else
2835 		tx->timeout.pm_frame_timeout = 0;
2836 	if (rate == 2 || rate == 4 || rate == 11 || rate == 22)
2837 		masks |= RATE_MCS_CCK_MSK;
2838 
2839 	masks |= RATE_MCS_ANT_B_MSK;
2840 	tx->rate.r.rate_n_flags = (iwk_rate_to_plcp(rate) | masks);
2841 
2842 	IWK_DBG((IWK_DEBUG_TX, "tx flag = %x",
2843 	    tx->tx_flags));
2844 
2845 	tx->rts_retry_limit = 60;
2846 	tx->data_retry_limit = 15;
2847 
2848 	tx->stop_time.life_time  = LE_32(0xffffffff);
2849 
2850 	tx->len = LE_16(len);
2851 
2852 	tx->dram_lsb_ptr =
2853 	    data->paddr_cmd + 4 + offsetof(iwk_tx_cmd_t, scratch);
2854 	tx->dram_msb_ptr = 0;
2855 	tx->driver_txop = 0;
2856 	tx->next_frame_len = 0;
2857 
2858 	(void) memcpy(tx + 1, m->b_rptr, hdrlen);
2859 	m->b_rptr += hdrlen;
2860 	(void) memcpy(data->dma_data.mem_va, m->b_rptr, len - hdrlen);
2861 
2862 	IWK_DBG((IWK_DEBUG_TX, "sending data: qid=%d idx=%d len=%d",
2863 	    ring->qid, ring->cur, len));
2864 
2865 	/*
2866 	 * first segment includes the tx cmd plus the 802.11 header,
2867 	 * the second includes the remaining of the 802.11 frame.
2868 	 */
2869 	desc->val0 = LE_32(2 << 24);
2870 	desc->pa[0].tb1_addr = LE_32(data->paddr_cmd);
2871 	desc->pa[0].val1 = ((len0 << 4) & 0xfff0) |
2872 	    ((data->dma_data.cookie.dmac_address & 0xffff) << 16);
2873 	desc->pa[0].val2 =
2874 	    ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) |
2875 	    ((len - hdrlen) << 20);
2876 	IWK_DBG((IWK_DEBUG_TX, "phy addr1 = 0x%x phy addr2 = 0x%x "
2877 	    "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x",
2878 	    data->paddr_cmd, data->dma_data.cookie.dmac_address,
2879 	    len0, len - hdrlen, desc->pa[0].val1, desc->pa[0].val2));
2880 
2881 	mutex_enter(&sc->sc_tx_lock);
2882 	ring->queued++;
2883 	mutex_exit(&sc->sc_tx_lock);
2884 
2885 	/* kick ring */
2886 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
2887 	    tfd_offset[ring->cur].val = 8 + len;
2888 	if (ring->cur < IWK_MAX_WIN_SIZE) {
2889 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
2890 		    tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8 + len;
2891 	}
2892 
2893 	IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
2894 	IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
2895 
2896 	ring->cur = (ring->cur + 1) % ring->count;
2897 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
2898 	freemsg(m);
2899 	/* release node reference */
2900 	ieee80211_free_node(in);
2901 
2902 	ic->ic_stats.is_tx_bytes += len;
2903 	ic->ic_stats.is_tx_frags++;
2904 
2905 	if (sc->sc_tx_timer == 0)
2906 		sc->sc_tx_timer = 4;
2907 
2908 exit:
2909 	return (err);
2910 }
2911 
2912 static void
2913 iwk_m_ioctl(void* arg, queue_t *wq, mblk_t *mp)
2914 {
2915 	iwk_sc_t	*sc  = (iwk_sc_t *)arg;
2916 	ieee80211com_t	*ic = &sc->sc_ic;
2917 
2918 	enum ieee80211_opmode		oldmod;
2919 	iwk_tx_power_table_cmd_t	txpower;
2920 	iwk_add_sta_t			node;
2921 	iwk_link_quality_cmd_t		link_quality;
2922 	uint16_t			masks = 0;
2923 	int				i, err, err1;
2924 
2925 	oldmod = ic->ic_opmode;
2926 
2927 	err = ieee80211_ioctl(ic, wq, mp);
2928 
2929 	/*
2930 	 * return to STA mode
2931 	 */
2932 	if ((0 == err || ENETRESET == err) && (oldmod != ic->ic_opmode) &&
2933 	    (ic->ic_opmode == IEEE80211_M_STA)) {
2934 		/* configure rxon */
2935 		(void) memset(&sc->sc_config, 0, sizeof (iwk_rxon_cmd_t));
2936 		IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
2937 		IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
2938 		sc->sc_config.chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
2939 		sc->sc_config.flags = (RXON_FLG_TSF2HOST_MSK |
2940 		    RXON_FLG_AUTO_DETECT_MSK |
2941 		    RXON_FLG_BAND_24G_MSK);
2942 		sc->sc_config.flags &= (~RXON_FLG_CCK_MSK);
2943 		switch (ic->ic_opmode) {
2944 		case IEEE80211_M_STA:
2945 			sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
2946 			sc->sc_config.filter_flags |=
2947 			    LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
2948 			    RXON_FILTER_DIS_DECRYPT_MSK |
2949 			    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
2950 			break;
2951 		case IEEE80211_M_IBSS:
2952 		case IEEE80211_M_AHDEMO:
2953 			sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
2954 			sc->sc_config.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2955 			sc->sc_config.filter_flags =
2956 			    LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
2957 			    RXON_FILTER_DIS_DECRYPT_MSK |
2958 			    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
2959 			break;
2960 		case IEEE80211_M_HOSTAP:
2961 			sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
2962 			break;
2963 		case IEEE80211_M_MONITOR:
2964 			sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
2965 			sc->sc_config.filter_flags |=
2966 			    LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
2967 			    RXON_FILTER_CTL2HOST_MSK |
2968 			    RXON_FILTER_PROMISC_MSK);
2969 			break;
2970 		}
2971 		sc->sc_config.cck_basic_rates  = 0x0f;
2972 		sc->sc_config.ofdm_basic_rates = 0xff;
2973 		sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
2974 		sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff;
2975 		/* set antenna */
2976 		mutex_enter(&sc->sc_glock);
2977 		sc->sc_config.rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
2978 		    LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) |
2979 		    (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
2980 		    (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
2981 		err1 = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
2982 		    sizeof (iwk_rxon_cmd_t), 1);
2983 		if (err1 != IWK_SUCCESS) {
2984 			cmn_err(CE_WARN, "iwk_m_ioctl(): "
2985 			    "failed to set configure command"
2986 			    " please run (ifconfig unplumb and"
2987 			    " ifconfig plumb)\n");
2988 		}
2989 		/*
2990 		 * set Tx power for 2.4GHz channels
2991 		 * (need further investigation. fix tx power at present)
2992 		 */
2993 		(void) memset(&txpower, 0, sizeof (txpower));
2994 		txpower.band = 1; /* for 2.4G */
2995 		txpower.channel = sc->sc_config.chan;
2996 		txpower.channel_normal_width = 0;
2997 		for (i = 0; i < POWER_TABLE_NUM_HT_OFDM_ENTRIES; i++) {
2998 			txpower.tx_power.ht_ofdm_power[i].
2999 			    s.ramon_tx_gain = 0x3f3f;
3000 			txpower.tx_power.ht_ofdm_power[i].
3001 			    s.dsp_predis_atten = 110 | (110 << 8);
3002 		}
3003 		txpower.tx_power.legacy_cck_power.s.ramon_tx_gain = 0x3f3f;
3004 		txpower.tx_power.legacy_cck_power.s.dsp_predis_atten
3005 		    = 110 | (110 << 8);
3006 		err1 = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &txpower,
3007 		    sizeof (txpower), 1);
3008 		if (err1 != IWK_SUCCESS) {
3009 			cmn_err(CE_WARN, "iwk_m_ioctl(): failed to set txpower"
3010 			    " please run (ifconfig unplumb "
3011 			    "and ifconfig plumb)\n");
3012 		}
3013 		/* add broadcast node so that we can send broadcast frame */
3014 		(void) memset(&node, 0, sizeof (node));
3015 		(void) memset(node.bssid, 0xff, 6);
3016 		node.id = IWK_BROADCAST_ID;
3017 		err1 = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
3018 		if (err1 != IWK_SUCCESS) {
3019 			cmn_err(CE_WARN, "iwk_m_ioctl(): "
3020 			    "failed to add broadcast node\n");
3021 		}
3022 
3023 		/* TX_LINK_QUALITY cmd */
3024 		(void) memset(&link_quality, 0, sizeof (link_quality));
3025 		for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3026 			masks |= RATE_MCS_CCK_MSK;
3027 			masks |= RATE_MCS_ANT_B_MSK;
3028 			masks &= ~RATE_MCS_ANT_A_MSK;
3029 			link_quality.rate_n_flags[i] =
3030 			    iwk_rate_to_plcp(2) | masks;
3031 		}
3032 		link_quality.general_params.single_stream_ant_msk = 2;
3033 		link_quality.general_params.dual_stream_ant_msk = 3;
3034 		link_quality.agg_params.agg_dis_start_th = 3;
3035 		link_quality.agg_params.agg_time_limit = LE_16(4000);
3036 		link_quality.sta_id = IWK_BROADCAST_ID;
3037 		err1 = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
3038 		    sizeof (link_quality), 1);
3039 		if (err1 != IWK_SUCCESS) {
3040 			cmn_err(CE_WARN, "iwk_m_ioctl(): "
3041 			    "failed to config link quality table\n");
3042 		}
3043 		mutex_exit(&sc->sc_glock);
3044 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3045 	}
3046 
3047 	if (err == ENETRESET) {
3048 		/*
3049 		 * This is special for the hidden AP connection.
3050 		 * In any case, we should make sure only one 'scan'
3051 		 * in the driver for a 'connect' CLI command. So
3052 		 * when connecting to a hidden AP, the scan is just
3053 		 * sent out to the air when we know the desired
3054 		 * essid of the AP we want to connect.
3055 		 */
3056 		if (ic->ic_des_esslen) {
3057 			if (sc->sc_flags & IWK_F_RUNNING) {
3058 				iwk_m_stop(sc);
3059 				(void) iwk_m_start(sc);
3060 				(void) ieee80211_new_state(ic,
3061 				    IEEE80211_S_SCAN, -1);
3062 			}
3063 		}
3064 	}
3065 }
3066 
3067 /*
3068  * callback functions for set/get properties
3069  */
3070 /* ARGSUSED */
3071 static int
3072 iwk_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3073     uint_t pr_flags, uint_t wldp_length, void *wldp_buf, uint_t *perm)
3074 {
3075 	int		err = 0;
3076 	iwk_sc_t	*sc = (iwk_sc_t *)arg;
3077 
3078 	err = ieee80211_getprop(&sc->sc_ic, pr_name, wldp_pr_num,
3079 	    pr_flags, wldp_length, wldp_buf, perm);
3080 
3081 	return (err);
3082 }
3083 static int
3084 iwk_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3085     uint_t wldp_length, const void *wldp_buf)
3086 {
3087 	int		err;
3088 	iwk_sc_t	*sc = (iwk_sc_t *)arg;
3089 	ieee80211com_t	*ic = &sc->sc_ic;
3090 
3091 	err = ieee80211_setprop(ic, pr_name, wldp_pr_num, wldp_length,
3092 	    wldp_buf);
3093 
3094 	if (err == ENETRESET) {
3095 		if (ic->ic_des_esslen) {
3096 			if (sc->sc_flags & IWK_F_RUNNING) {
3097 				iwk_m_stop(sc);
3098 				(void) iwk_m_start(sc);
3099 				(void) ieee80211_new_state(ic,
3100 				    IEEE80211_S_SCAN, -1);
3101 			}
3102 		}
3103 		err = 0;
3104 	}
3105 
3106 	return (err);
3107 }
3108 
3109 /*ARGSUSED*/
3110 static int
3111 iwk_m_stat(void *arg, uint_t stat, uint64_t *val)
3112 {
3113 	iwk_sc_t	*sc  = (iwk_sc_t *)arg;
3114 	ieee80211com_t	*ic = &sc->sc_ic;
3115 	ieee80211_node_t *in;
3116 
3117 	mutex_enter(&sc->sc_glock);
3118 	switch (stat) {
3119 	case MAC_STAT_IFSPEED:
3120 		in = ic->ic_bss;
3121 		*val = ((ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) ?
3122 		    IEEE80211_RATE(in->in_txrate) :
3123 		    ic->ic_fixed_rate) / 2 * 1000000;
3124 		break;
3125 	case MAC_STAT_NOXMTBUF:
3126 		*val = sc->sc_tx_nobuf;
3127 		break;
3128 	case MAC_STAT_NORCVBUF:
3129 		*val = sc->sc_rx_nobuf;
3130 		break;
3131 	case MAC_STAT_IERRORS:
3132 		*val = sc->sc_rx_err;
3133 		break;
3134 	case MAC_STAT_RBYTES:
3135 		*val = ic->ic_stats.is_rx_bytes;
3136 		break;
3137 	case MAC_STAT_IPACKETS:
3138 		*val = ic->ic_stats.is_rx_frags;
3139 		break;
3140 	case MAC_STAT_OBYTES:
3141 		*val = ic->ic_stats.is_tx_bytes;
3142 		break;
3143 	case MAC_STAT_OPACKETS:
3144 		*val = ic->ic_stats.is_tx_frags;
3145 		break;
3146 	case MAC_STAT_OERRORS:
3147 	case WIFI_STAT_TX_FAILED:
3148 		*val = sc->sc_tx_err;
3149 		break;
3150 	case WIFI_STAT_TX_RETRANS:
3151 		*val = sc->sc_tx_retries;
3152 		break;
3153 	case WIFI_STAT_FCS_ERRORS:
3154 	case WIFI_STAT_WEP_ERRORS:
3155 	case WIFI_STAT_TX_FRAGS:
3156 	case WIFI_STAT_MCAST_TX:
3157 	case WIFI_STAT_RTS_SUCCESS:
3158 	case WIFI_STAT_RTS_FAILURE:
3159 	case WIFI_STAT_ACK_FAILURE:
3160 	case WIFI_STAT_RX_FRAGS:
3161 	case WIFI_STAT_MCAST_RX:
3162 	case WIFI_STAT_RX_DUPS:
3163 		mutex_exit(&sc->sc_glock);
3164 		return (ieee80211_stat(ic, stat, val));
3165 	default:
3166 		mutex_exit(&sc->sc_glock);
3167 		return (ENOTSUP);
3168 	}
3169 	mutex_exit(&sc->sc_glock);
3170 
3171 	return (IWK_SUCCESS);
3172 
3173 }
3174 
3175 static int
3176 iwk_m_start(void *arg)
3177 {
3178 	iwk_sc_t *sc = (iwk_sc_t *)arg;
3179 	ieee80211com_t	*ic = &sc->sc_ic;
3180 	int err;
3181 
3182 	err = iwk_init(sc);
3183 
3184 	if (err != IWK_SUCCESS) {
3185 		/*
3186 		 * The hw init err(eg. RF is OFF). Return Success to make
3187 		 * the 'plumb' succeed. The iwk_thread() tries to re-init
3188 		 * background.
3189 		 */
3190 		cmn_err(CE_WARN, "iwk_m_start(): failed to initialize "
3191 		    "hardware\n");
3192 		mutex_enter(&sc->sc_glock);
3193 		sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
3194 		mutex_exit(&sc->sc_glock);
3195 		return (IWK_SUCCESS);
3196 	}
3197 
3198 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3199 
3200 	mutex_enter(&sc->sc_glock);
3201 	sc->sc_flags |= IWK_F_RUNNING;
3202 	mutex_exit(&sc->sc_glock);
3203 
3204 	return (IWK_SUCCESS);
3205 }
3206 
3207 static void
3208 iwk_m_stop(void *arg)
3209 {
3210 	iwk_sc_t *sc = (iwk_sc_t *)arg;
3211 	ieee80211com_t	*ic = &sc->sc_ic;
3212 
3213 	iwk_stop(sc);
3214 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3215 	ieee80211_stop_watchdog(ic);
3216 	mutex_enter(&sc->sc_mt_lock);
3217 	sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
3218 	sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL;
3219 	mutex_exit(&sc->sc_mt_lock);
3220 	mutex_enter(&sc->sc_glock);
3221 	sc->sc_flags &= ~IWK_F_RUNNING;
3222 	mutex_exit(&sc->sc_glock);
3223 }
3224 
3225 /*ARGSUSED*/
3226 static int
3227 iwk_m_unicst(void *arg, const uint8_t *macaddr)
3228 {
3229 	iwk_sc_t *sc = (iwk_sc_t *)arg;
3230 	ieee80211com_t	*ic = &sc->sc_ic;
3231 	int err;
3232 
3233 	if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) {
3234 		IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr);
3235 		mutex_enter(&sc->sc_glock);
3236 		err = iwk_config(sc);
3237 		mutex_exit(&sc->sc_glock);
3238 		if (err != IWK_SUCCESS) {
3239 			cmn_err(CE_WARN,
3240 			    "iwk_m_unicst(): "
3241 			    "failed to configure device\n");
3242 			goto fail;
3243 		}
3244 	}
3245 	return (IWK_SUCCESS);
3246 fail:
3247 	return (err);
3248 }
3249 
3250 /*ARGSUSED*/
3251 static int
3252 iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m)
3253 {
3254 	return (IWK_SUCCESS);
3255 }
3256 
3257 /*ARGSUSED*/
3258 static int
3259 iwk_m_promisc(void *arg, boolean_t on)
3260 {
3261 	return (IWK_SUCCESS);
3262 }
3263 
3264 static void
3265 iwk_thread(iwk_sc_t *sc)
3266 {
3267 	ieee80211com_t	*ic = &sc->sc_ic;
3268 	clock_t clk;
3269 	int times = 0, err, n = 0, timeout = 0;
3270 	uint32_t tmp;
3271 
3272 	mutex_enter(&sc->sc_mt_lock);
3273 	while (sc->sc_mf_thread_switch) {
3274 		tmp = IWK_READ(sc, CSR_GP_CNTRL);
3275 		if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) {
3276 			sc->sc_flags &= ~IWK_F_RADIO_OFF;
3277 		} else {
3278 			sc->sc_flags |= IWK_F_RADIO_OFF;
3279 		}
3280 		/*
3281 		 * If in SUSPEND or the RF is OFF, do nothing
3282 		 */
3283 		if ((sc->sc_flags & IWK_F_SUSPEND) ||
3284 		    (sc->sc_flags & IWK_F_RADIO_OFF)) {
3285 			mutex_exit(&sc->sc_mt_lock);
3286 			delay(drv_usectohz(100000));
3287 			mutex_enter(&sc->sc_mt_lock);
3288 			continue;
3289 		}
3290 
3291 		/*
3292 		 * recovery fatal error
3293 		 */
3294 		if (ic->ic_mach &&
3295 		    (sc->sc_flags & IWK_F_HW_ERR_RECOVER)) {
3296 
3297 			IWK_DBG((IWK_DEBUG_FW,
3298 			    "iwk_thread(): "
3299 			    "try to recover fatal hw error: %d\n", times++));
3300 
3301 			iwk_stop(sc);
3302 
3303 			if (IWK_CHK_FAST_RECOVER(sc)) {
3304 				/* save runtime configuration */
3305 				bcopy(&sc->sc_config, &sc->sc_config_save,
3306 				    sizeof (sc->sc_config));
3307 			} else {
3308 				mutex_exit(&sc->sc_mt_lock);
3309 				ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3310 				delay(drv_usectohz(2000000 + n*500000));
3311 				mutex_enter(&sc->sc_mt_lock);
3312 			}
3313 
3314 			err = iwk_init(sc);
3315 			if (err != IWK_SUCCESS) {
3316 				n++;
3317 				if (n < 20)
3318 					continue;
3319 			}
3320 			n = 0;
3321 			if (!err)
3322 				sc->sc_flags |= IWK_F_RUNNING;
3323 
3324 			if (!IWK_CHK_FAST_RECOVER(sc) ||
3325 			    iwk_fast_recover(sc) != IWK_SUCCESS) {
3326 				sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
3327 
3328 				mutex_exit(&sc->sc_mt_lock);
3329 				delay(drv_usectohz(2000000));
3330 				if (sc->sc_ostate != IEEE80211_S_INIT)
3331 					ieee80211_new_state(ic,
3332 					    IEEE80211_S_SCAN, 0);
3333 				mutex_enter(&sc->sc_mt_lock);
3334 			}
3335 		}
3336 
3337 		if (ic->ic_mach && (sc->sc_flags & IWK_F_LAZY_RESUME)) {
3338 			IWK_DBG((IWK_DEBUG_RESUME,
3339 			    "iwk_thread(): lazy resume\n"));
3340 
3341 			sc->sc_flags &= ~IWK_F_LAZY_RESUME;
3342 			mutex_exit(&sc->sc_mt_lock);
3343 			/*
3344 			 * NB: under WPA mode, this call hangs (door problem?)
3345 			 * when called in iwk_attach() and iwk_detach() while
3346 			 * system is in the procedure of CPR. To be safe, let
3347 			 * the thread do this.
3348 			 */
3349 			ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
3350 			mutex_enter(&sc->sc_mt_lock);
3351 		}
3352 
3353 		if (ic->ic_mach &&
3354 		    (sc->sc_flags & IWK_F_SCANNING) && sc->sc_scan_pending) {
3355 			IWK_DBG((IWK_DEBUG_SCAN,
3356 			    "iwk_thread(): "
3357 			    "wait for probe response\n"));
3358 			sc->sc_scan_pending--;
3359 			mutex_exit(&sc->sc_mt_lock);
3360 			delay(drv_usectohz(200000));
3361 			if (sc->sc_flags & IWK_F_SCANNING)
3362 				ieee80211_next_scan(ic);
3363 			mutex_enter(&sc->sc_mt_lock);
3364 		}
3365 
3366 		/*
3367 		 * rate ctl
3368 		 */
3369 		if (ic->ic_mach &&
3370 		    (sc->sc_flags & IWK_F_RATE_AUTO_CTL)) {
3371 			clk = ddi_get_lbolt();
3372 			if (clk > sc->sc_clk + drv_usectohz(500000)) {
3373 				iwk_amrr_timeout(sc);
3374 			}
3375 		}
3376 
3377 		mutex_exit(&sc->sc_mt_lock);
3378 		delay(drv_usectohz(100000));
3379 		mutex_enter(&sc->sc_mt_lock);
3380 
3381 		if (sc->sc_tx_timer) {
3382 			timeout++;
3383 			if (timeout == 10) {
3384 				sc->sc_tx_timer--;
3385 				if (sc->sc_tx_timer == 0) {
3386 					sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
3387 					sc->sc_ostate = IEEE80211_S_RUN;
3388 					IWK_DBG((IWK_DEBUG_FW,
3389 					    "iwk_thread(): try to recover from"
3390 					    " 'send fail\n"));
3391 				}
3392 				timeout = 0;
3393 			}
3394 		}
3395 
3396 	}
3397 	sc->sc_mf_thread = NULL;
3398 	cv_signal(&sc->sc_mt_cv);
3399 	mutex_exit(&sc->sc_mt_lock);
3400 }
3401 
3402 
3403 /*
3404  * Send a command to the firmware.
3405  */
3406 static int
3407 iwk_cmd(iwk_sc_t *sc, int code, const void *buf, int size, int async)
3408 {
3409 	iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM];
3410 	iwk_tx_desc_t *desc;
3411 	iwk_cmd_t *cmd;
3412 	clock_t clk;
3413 
3414 	ASSERT(size <= sizeof (cmd->data));
3415 	ASSERT(mutex_owned(&sc->sc_glock));
3416 
3417 	IWK_DBG((IWK_DEBUG_CMD, "iwk_cmd() code[%d]", code));
3418 	desc = ring->data[ring->cur].desc;
3419 	cmd = ring->data[ring->cur].cmd;
3420 
3421 	cmd->hdr.type = (uint8_t)code;
3422 	cmd->hdr.flags = 0;
3423 	cmd->hdr.qid = ring->qid;
3424 	cmd->hdr.idx = ring->cur;
3425 	(void) memcpy(cmd->data, buf, size);
3426 	(void) memset(desc, 0, sizeof (*desc));
3427 
3428 	desc->val0 = LE_32(1 << 24);
3429 	desc->pa[0].tb1_addr =
3430 	    (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff);
3431 	desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0;
3432 
3433 	/* kick cmd ring XXX */
3434 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3435 	    tfd_offset[ring->cur].val = 8;
3436 	if (ring->cur < IWK_MAX_WIN_SIZE) {
3437 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3438 		    tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8;
3439 	}
3440 	ring->cur = (ring->cur + 1) % ring->count;
3441 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3442 
3443 	if (async)
3444 		return (IWK_SUCCESS);
3445 	else {
3446 		sc->sc_flags &= ~IWK_F_CMD_DONE;
3447 		clk = ddi_get_lbolt() + drv_usectohz(2000000);
3448 		while (!(sc->sc_flags & IWK_F_CMD_DONE)) {
3449 			if (cv_timedwait(&sc->sc_cmd_cv, &sc->sc_glock, clk) <
3450 			    0)
3451 				break;
3452 		}
3453 		if (sc->sc_flags & IWK_F_CMD_DONE)
3454 			return (IWK_SUCCESS);
3455 		else
3456 			return (IWK_FAIL);
3457 	}
3458 }
3459 
3460 static void
3461 iwk_set_led(iwk_sc_t *sc, uint8_t id, uint8_t off, uint8_t on)
3462 {
3463 	iwk_led_cmd_t led;
3464 
3465 	led.interval = LE_32(100000);	/* unit: 100ms */
3466 	led.id = id;
3467 	led.off = off;
3468 	led.on = on;
3469 
3470 	(void) iwk_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1);
3471 }
3472 
3473 static int
3474 iwk_hw_set_before_auth(iwk_sc_t *sc)
3475 {
3476 	ieee80211com_t *ic = &sc->sc_ic;
3477 	ieee80211_node_t *in = ic->ic_bss;
3478 	iwk_add_sta_t node;
3479 	iwk_link_quality_cmd_t link_quality;
3480 	struct ieee80211_rateset rs;
3481 	uint16_t masks = 0, rate;
3482 	int i, err;
3483 
3484 	if (in->in_chan == IEEE80211_CHAN_ANYC) {
3485 		cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
3486 		    "channel (%d) isn't in proper range\n",
3487 		    ieee80211_chan2ieee(ic, in->in_chan));
3488 		return (IWK_FAIL);
3489 	}
3490 
3491 	/* update adapter's configuration according the info of target AP */
3492 	IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
3493 	sc->sc_config.chan = ieee80211_chan2ieee(ic, in->in_chan);
3494 	if (ic->ic_curmode == IEEE80211_MODE_11B) {
3495 		sc->sc_config.cck_basic_rates  = 0x03;
3496 		sc->sc_config.ofdm_basic_rates = 0;
3497 	} else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
3498 	    (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
3499 		sc->sc_config.cck_basic_rates  = 0;
3500 		sc->sc_config.ofdm_basic_rates = 0x15;
3501 	} else { /* assume 802.11b/g */
3502 		sc->sc_config.cck_basic_rates  = 0x0f;
3503 		sc->sc_config.ofdm_basic_rates = 0xff;
3504 	}
3505 
3506 	sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
3507 	    RXON_FLG_SHORT_SLOT_MSK);
3508 
3509 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
3510 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK);
3511 	else
3512 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK);
3513 
3514 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
3515 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
3516 	else
3517 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK);
3518 
3519 	IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x "
3520 	    "filter_flags %x  cck %x ofdm %x"
3521 	    " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n",
3522 	    sc->sc_config.chan, sc->sc_config.flags,
3523 	    sc->sc_config.filter_flags,
3524 	    sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates,
3525 	    sc->sc_config.bssid[0], sc->sc_config.bssid[1],
3526 	    sc->sc_config.bssid[2], sc->sc_config.bssid[3],
3527 	    sc->sc_config.bssid[4], sc->sc_config.bssid[5]));
3528 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
3529 	    sizeof (iwk_rxon_cmd_t), 1);
3530 	if (err != IWK_SUCCESS) {
3531 		cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
3532 		    " failed to config chan%d\n",
3533 		    sc->sc_config.chan);
3534 		return (err);
3535 	}
3536 
3537 	/* obtain current temperature of chipset */
3538 	sc->sc_tempera = iwk_curr_tempera(sc);
3539 
3540 	/* make Tx power calibration to determine the gains of DSP and radio */
3541 	err = iwk_tx_power_calibration(sc);
3542 	if (err) {
3543 		cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
3544 		    "failed to set tx power table\n");
3545 		return (err);
3546 	}
3547 
3548 	/* add default AP node */
3549 	(void) memset(&node, 0, sizeof (node));
3550 	IEEE80211_ADDR_COPY(node.bssid, in->in_bssid);
3551 	node.id = IWK_AP_ID;
3552 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
3553 	if (err != IWK_SUCCESS) {
3554 		cmn_err(CE_WARN, "iwk_hw_set_before_auth(): "
3555 		    "failed to add BSS node\n");
3556 		return (err);
3557 	}
3558 
3559 	/* TX_LINK_QUALITY cmd */
3560 	(void) memset(&link_quality, 0, sizeof (link_quality));
3561 	rs = ic->ic_sup_rates[ieee80211_chan2mode(ic, ic->ic_curchan)];
3562 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3563 		if (i < rs.ir_nrates)
3564 			rate = rs.ir_rates[rs.ir_nrates - i];
3565 		else
3566 			rate = 2;
3567 		if (rate == 2 || rate == 4 || rate == 11 || rate == 22)
3568 			masks |= RATE_MCS_CCK_MSK;
3569 		masks |= RATE_MCS_ANT_B_MSK;
3570 		masks &= ~RATE_MCS_ANT_A_MSK;
3571 		link_quality.rate_n_flags[i] =
3572 		    iwk_rate_to_plcp(rate) | masks;
3573 	}
3574 
3575 	link_quality.general_params.single_stream_ant_msk = 2;
3576 	link_quality.general_params.dual_stream_ant_msk = 3;
3577 	link_quality.agg_params.agg_dis_start_th = 3;
3578 	link_quality.agg_params.agg_time_limit = LE_16(4000);
3579 	link_quality.sta_id = IWK_AP_ID;
3580 	err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
3581 	    sizeof (link_quality), 1);
3582 	if (err != IWK_SUCCESS) {
3583 		cmn_err(CE_WARN, "iwk_hw_set_before_auth(): "
3584 		    "failed to config link quality table\n");
3585 		return (err);
3586 	}
3587 
3588 	return (IWK_SUCCESS);
3589 }
3590 
3591 /*
3592  * Send a scan request(assembly scan cmd) to the firmware.
3593  */
3594 static int
3595 iwk_scan(iwk_sc_t *sc)
3596 {
3597 	ieee80211com_t *ic = &sc->sc_ic;
3598 	iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM];
3599 	iwk_tx_desc_t *desc;
3600 	iwk_tx_data_t *data;
3601 	iwk_cmd_t *cmd;
3602 	iwk_scan_hdr_t *hdr;
3603 	iwk_scan_chan_t *chan;
3604 	struct ieee80211_frame *wh;
3605 	ieee80211_node_t *in = ic->ic_bss;
3606 	uint8_t essid[IEEE80211_NWID_LEN+1];
3607 	struct ieee80211_rateset *rs;
3608 	enum ieee80211_phymode mode;
3609 	uint8_t *frm;
3610 	int i, pktlen, nrates;
3611 
3612 	data = &ring->data[ring->cur];
3613 	desc = data->desc;
3614 	cmd = (iwk_cmd_t *)data->dma_data.mem_va;
3615 
3616 	cmd->hdr.type = REPLY_SCAN_CMD;
3617 	cmd->hdr.flags = 0;
3618 	cmd->hdr.qid = ring->qid;
3619 	cmd->hdr.idx = ring->cur | 0x40;
3620 
3621 	hdr = (iwk_scan_hdr_t *)cmd->data;
3622 	(void) memset(hdr, 0, sizeof (iwk_scan_hdr_t));
3623 	hdr->nchan = 1;
3624 	hdr->quiet_time = LE_16(50);
3625 	hdr->quiet_plcp_th = LE_16(1);
3626 
3627 	hdr->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
3628 	hdr->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3629 	    LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) |
3630 	    (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3631 	    (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3632 
3633 	hdr->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
3634 	hdr->tx_cmd.sta_id = IWK_BROADCAST_ID;
3635 	hdr->tx_cmd.stop_time.life_time = 0xffffffff;
3636 	hdr->tx_cmd.tx_flags |= (0x200);
3637 	hdr->tx_cmd.rate.r.rate_n_flags = iwk_rate_to_plcp(2);
3638 	hdr->tx_cmd.rate.r.rate_n_flags |=
3639 	    (RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK);
3640 	hdr->direct_scan[0].len = ic->ic_des_esslen;
3641 	hdr->direct_scan[0].id  = IEEE80211_ELEMID_SSID;
3642 
3643 	if (ic->ic_des_esslen) {
3644 		bcopy(ic->ic_des_essid, essid, ic->ic_des_esslen);
3645 		essid[ic->ic_des_esslen] = '\0';
3646 		IWK_DBG((IWK_DEBUG_SCAN, "directed scan %s\n", essid));
3647 
3648 		bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid,
3649 		    ic->ic_des_esslen);
3650 	} else {
3651 		bzero(hdr->direct_scan[0].ssid,
3652 		    sizeof (hdr->direct_scan[0].ssid));
3653 	}
3654 	/*
3655 	 * a probe request frame is required after the REPLY_SCAN_CMD
3656 	 */
3657 	wh = (struct ieee80211_frame *)(hdr + 1);
3658 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
3659 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
3660 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
3661 	(void) memset(wh->i_addr1, 0xff, 6);
3662 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr);
3663 	(void) memset(wh->i_addr3, 0xff, 6);
3664 	*(uint16_t *)&wh->i_dur[0] = 0;
3665 	*(uint16_t *)&wh->i_seq[0] = 0;
3666 
3667 	frm = (uint8_t *)(wh + 1);
3668 
3669 	/* essid IE */
3670 	if (in->in_esslen) {
3671 		bcopy(in->in_essid, essid, in->in_esslen);
3672 		essid[in->in_esslen] = '\0';
3673 		IWK_DBG((IWK_DEBUG_SCAN, "probe with ESSID %s\n",
3674 		    essid));
3675 	}
3676 	*frm++ = IEEE80211_ELEMID_SSID;
3677 	*frm++ = in->in_esslen;
3678 	(void) memcpy(frm, in->in_essid, in->in_esslen);
3679 	frm += in->in_esslen;
3680 
3681 	mode = ieee80211_chan2mode(ic, ic->ic_curchan);
3682 	rs = &ic->ic_sup_rates[mode];
3683 
3684 	/* supported rates IE */
3685 	*frm++ = IEEE80211_ELEMID_RATES;
3686 	nrates = rs->ir_nrates;
3687 	if (nrates > IEEE80211_RATE_SIZE)
3688 		nrates = IEEE80211_RATE_SIZE;
3689 	*frm++ = (uint8_t)nrates;
3690 	(void) memcpy(frm, rs->ir_rates, nrates);
3691 	frm += nrates;
3692 
3693 	/* supported xrates IE */
3694 	if (rs->ir_nrates > IEEE80211_RATE_SIZE) {
3695 		nrates = rs->ir_nrates - IEEE80211_RATE_SIZE;
3696 		*frm++ = IEEE80211_ELEMID_XRATES;
3697 		*frm++ = (uint8_t)nrates;
3698 		(void) memcpy(frm, rs->ir_rates + IEEE80211_RATE_SIZE, nrates);
3699 		frm += nrates;
3700 	}
3701 
3702 	/* optionnal IE (usually for wpa) */
3703 	if (ic->ic_opt_ie != NULL) {
3704 		(void) memcpy(frm, ic->ic_opt_ie, ic->ic_opt_ie_len);
3705 		frm += ic->ic_opt_ie_len;
3706 	}
3707 
3708 	/* setup length of probe request */
3709 	hdr->tx_cmd.len = LE_16(_PTRDIFF(frm, wh));
3710 	hdr->len = hdr->nchan * sizeof (iwk_scan_chan_t) +
3711 	    hdr->tx_cmd.len + sizeof (iwk_scan_hdr_t);
3712 
3713 	/*
3714 	 * the attribute of the scan channels are required after the probe
3715 	 * request frame.
3716 	 */
3717 	chan = (iwk_scan_chan_t *)frm;
3718 	for (i = 1; i <= hdr->nchan; i++, chan++) {
3719 		if (ic->ic_des_esslen) {
3720 			chan->type = 3;
3721 		} else {
3722 			chan->type = 1;
3723 		}
3724 
3725 		chan->chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
3726 		chan->tpc.tx_gain = 0x3f;
3727 		chan->tpc.dsp_atten = 110;
3728 		chan->active_dwell = LE_16(50);
3729 		chan->passive_dwell = LE_16(120);
3730 
3731 		frm += sizeof (iwk_scan_chan_t);
3732 	}
3733 
3734 	pktlen = _PTRDIFF(frm, cmd);
3735 
3736 	(void) memset(desc, 0, sizeof (*desc));
3737 	desc->val0 = LE_32(1 << 24);
3738 	desc->pa[0].tb1_addr =
3739 	    (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff);
3740 	desc->pa[0].val1 = (pktlen << 4) & 0xfff0;
3741 
3742 	/*
3743 	 * maybe for cmd, filling the byte cnt table is not necessary.
3744 	 * anyway, we fill it here.
3745 	 */
3746 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3747 	    tfd_offset[ring->cur].val = 8;
3748 	if (ring->cur < IWK_MAX_WIN_SIZE) {
3749 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3750 		    tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8;
3751 	}
3752 
3753 	/* kick cmd ring */
3754 	ring->cur = (ring->cur + 1) % ring->count;
3755 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3756 
3757 	return (IWK_SUCCESS);
3758 }
3759 
3760 static int
3761 iwk_config(iwk_sc_t *sc)
3762 {
3763 	ieee80211com_t *ic = &sc->sc_ic;
3764 	iwk_powertable_cmd_t powertable;
3765 	iwk_bt_cmd_t bt;
3766 	iwk_add_sta_t node;
3767 	iwk_link_quality_cmd_t link_quality;
3768 	int i, err;
3769 	uint16_t masks = 0;
3770 
3771 	/*
3772 	 * set power mode. Disable power management at present, do it later
3773 	 */
3774 	(void) memset(&powertable, 0, sizeof (powertable));
3775 	powertable.flags = LE_16(0x8);
3776 	err = iwk_cmd(sc, POWER_TABLE_CMD, &powertable,
3777 	    sizeof (powertable), 0);
3778 	if (err != IWK_SUCCESS) {
3779 		cmn_err(CE_WARN, "iwk_config(): failed to set power mode\n");
3780 		return (err);
3781 	}
3782 
3783 	/* configure bt coexistence */
3784 	(void) memset(&bt, 0, sizeof (bt));
3785 	bt.flags = 3;
3786 	bt.lead_time = 0xaa;
3787 	bt.max_kill = 1;
3788 	err = iwk_cmd(sc, REPLY_BT_CONFIG, &bt,
3789 	    sizeof (bt), 0);
3790 	if (err != IWK_SUCCESS) {
3791 		cmn_err(CE_WARN,
3792 		    "iwk_config(): "
3793 		    "failed to configurate bt coexistence\n");
3794 		return (err);
3795 	}
3796 
3797 	/* configure rxon */
3798 	(void) memset(&sc->sc_config, 0, sizeof (iwk_rxon_cmd_t));
3799 	IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
3800 	IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
3801 	sc->sc_config.chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
3802 	sc->sc_config.flags = (RXON_FLG_TSF2HOST_MSK |
3803 	    RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_BAND_24G_MSK);
3804 	sc->sc_config.flags &= (~RXON_FLG_CCK_MSK);
3805 	switch (ic->ic_opmode) {
3806 	case IEEE80211_M_STA:
3807 		sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
3808 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3809 		    RXON_FILTER_DIS_DECRYPT_MSK |
3810 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
3811 		break;
3812 	case IEEE80211_M_IBSS:
3813 	case IEEE80211_M_AHDEMO:
3814 		sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
3815 		sc->sc_config.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
3816 		sc->sc_config.filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3817 		    RXON_FILTER_DIS_DECRYPT_MSK |
3818 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
3819 		break;
3820 	case IEEE80211_M_HOSTAP:
3821 		sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
3822 		break;
3823 	case IEEE80211_M_MONITOR:
3824 		sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
3825 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3826 		    RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
3827 		break;
3828 	}
3829 	sc->sc_config.cck_basic_rates  = 0x0f;
3830 	sc->sc_config.ofdm_basic_rates = 0xff;
3831 
3832 	sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
3833 	sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff;
3834 
3835 	/* set antenna */
3836 
3837 	sc->sc_config.rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3838 	    LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) |
3839 	    (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3840 	    (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3841 
3842 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
3843 	    sizeof (iwk_rxon_cmd_t), 0);
3844 	if (err != IWK_SUCCESS) {
3845 		cmn_err(CE_WARN, "iwk_config(): "
3846 		    "failed to set configure command\n");
3847 		return (err);
3848 	}
3849 	/* obtain current temperature of chipset */
3850 	sc->sc_tempera = iwk_curr_tempera(sc);
3851 
3852 	/* make Tx power calibration to determine the gains of DSP and radio */
3853 	err = iwk_tx_power_calibration(sc);
3854 	if (err) {
3855 		cmn_err(CE_WARN, "iwk_config(): "
3856 		    "failed to set tx power table\n");
3857 		return (err);
3858 	}
3859 
3860 	/* add broadcast node so that we can send broadcast frame */
3861 	(void) memset(&node, 0, sizeof (node));
3862 	(void) memset(node.bssid, 0xff, 6);
3863 	node.id = IWK_BROADCAST_ID;
3864 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0);
3865 	if (err != IWK_SUCCESS) {
3866 		cmn_err(CE_WARN, "iwk_config(): "
3867 		    "failed to add broadcast node\n");
3868 		return (err);
3869 	}
3870 
3871 	/* TX_LINK_QUALITY cmd ? */
3872 	(void) memset(&link_quality, 0, sizeof (link_quality));
3873 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3874 		masks |= RATE_MCS_CCK_MSK;
3875 		masks |= RATE_MCS_ANT_B_MSK;
3876 		masks &= ~RATE_MCS_ANT_A_MSK;
3877 		link_quality.rate_n_flags[i] = iwk_rate_to_plcp(2) | masks;
3878 	}
3879 
3880 	link_quality.general_params.single_stream_ant_msk = 2;
3881 	link_quality.general_params.dual_stream_ant_msk = 3;
3882 	link_quality.agg_params.agg_dis_start_th = 3;
3883 	link_quality.agg_params.agg_time_limit = LE_16(4000);
3884 	link_quality.sta_id = IWK_BROADCAST_ID;
3885 	err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
3886 	    sizeof (link_quality), 0);
3887 	if (err != IWK_SUCCESS) {
3888 		cmn_err(CE_WARN, "iwk_config(): "
3889 		    "failed to config link quality table\n");
3890 		return (err);
3891 	}
3892 
3893 	return (IWK_SUCCESS);
3894 }
3895 
3896 static void
3897 iwk_stop_master(iwk_sc_t *sc)
3898 {
3899 	uint32_t tmp;
3900 	int n;
3901 
3902 	tmp = IWK_READ(sc, CSR_RESET);
3903 	IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER);
3904 
3905 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
3906 	if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) ==
3907 	    CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE)
3908 		return;
3909 
3910 	for (n = 0; n < 2000; n++) {
3911 		if (IWK_READ(sc, CSR_RESET) &
3912 		    CSR_RESET_REG_FLAG_MASTER_DISABLED)
3913 			break;
3914 		DELAY(1000);
3915 	}
3916 	if (n == 2000)
3917 		IWK_DBG((IWK_DEBUG_HW,
3918 		    "timeout waiting for master stop\n"));
3919 }
3920 
3921 static int
3922 iwk_power_up(iwk_sc_t *sc)
3923 {
3924 	uint32_t tmp;
3925 
3926 	iwk_mac_access_enter(sc);
3927 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3928 	tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC;
3929 	tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN;
3930 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3931 	iwk_mac_access_exit(sc);
3932 
3933 	DELAY(5000);
3934 	return (IWK_SUCCESS);
3935 }
3936 
3937 static int
3938 iwk_preinit(iwk_sc_t *sc)
3939 {
3940 	uint32_t tmp;
3941 	int n;
3942 	uint8_t vlink;
3943 
3944 	/* clear any pending interrupts */
3945 	IWK_WRITE(sc, CSR_INT, 0xffffffff);
3946 
3947 	tmp = IWK_READ(sc, CSR_GIO_CHICKEN_BITS);
3948 	IWK_WRITE(sc, CSR_GIO_CHICKEN_BITS,
3949 	    tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
3950 
3951 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
3952 	IWK_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
3953 
3954 	/* wait for clock ready */
3955 	for (n = 0; n < 1000; n++) {
3956 		if (IWK_READ(sc, CSR_GP_CNTRL) &
3957 		    CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY)
3958 			break;
3959 		DELAY(10);
3960 	}
3961 	if (n == 1000) {
3962 		cmn_err(CE_WARN,
3963 		    "iwk_preinit(): timeout waiting for clock ready\n");
3964 		return (ETIMEDOUT);
3965 	}
3966 	iwk_mac_access_enter(sc);
3967 	tmp = iwk_reg_read(sc, APMG_CLK_CTRL_REG);
3968 	iwk_reg_write(sc, APMG_CLK_CTRL_REG, tmp |
3969 	    APMG_CLK_REG_VAL_DMA_CLK_RQT | APMG_CLK_REG_VAL_BSM_CLK_RQT);
3970 
3971 	DELAY(20);
3972 	tmp = iwk_reg_read(sc, ALM_APMG_PCIDEV_STT);
3973 	iwk_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp |
3974 	    APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE);
3975 	iwk_mac_access_exit(sc);
3976 
3977 	IWK_WRITE(sc, CSR_INT_COALESCING, 512 / 32); /* ??? */
3978 
3979 	(void) iwk_power_up(sc);
3980 
3981 	if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) {
3982 		tmp = ddi_get32(sc->sc_cfg_handle,
3983 		    (uint32_t *)(sc->sc_cfg_base + 0xe8));
3984 		ddi_put32(sc->sc_cfg_handle,
3985 		    (uint32_t *)(sc->sc_cfg_base + 0xe8),
3986 		    tmp & ~(1 << 11));
3987 	}
3988 
3989 
3990 	vlink = ddi_get8(sc->sc_cfg_handle,
3991 	    (uint8_t *)(sc->sc_cfg_base + 0xf0));
3992 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0),
3993 	    vlink & ~2);
3994 
3995 	tmp = IWK_READ(sc, CSR_SW_VER);
3996 	tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
3997 	    CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
3998 	    CSR_HW_IF_CONFIG_REG_BIT_KEDRON_R;
3999 	IWK_WRITE(sc, CSR_SW_VER, tmp);
4000 
4001 	/* make sure power supply on each part of the hardware */
4002 	iwk_mac_access_enter(sc);
4003 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
4004 	tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4005 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4006 	DELAY(5);
4007 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
4008 	tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4009 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4010 	iwk_mac_access_exit(sc);
4011 	return (IWK_SUCCESS);
4012 }
4013 
4014 /*
4015  * set up semphore flag to own EEPROM
4016  */
4017 static int iwk_eep_sem_down(iwk_sc_t *sc)
4018 {
4019 	int count1, count2;
4020 	uint32_t tmp;
4021 
4022 	for (count1 = 0; count1 < 1000; count1++) {
4023 		tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG);
4024 		IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4025 		    tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM);
4026 
4027 		for (count2 = 0; count2 < 2; count2++) {
4028 			if (IWK_READ(sc, CSR_HW_IF_CONFIG_REG) &
4029 			    CSR_HW_IF_CONFIG_REG_EEP_SEM)
4030 				return (IWK_SUCCESS);
4031 			DELAY(10000);
4032 		}
4033 	}
4034 	return (IWK_FAIL);
4035 }
4036 
4037 /*
4038  * reset semphore flag to release EEPROM
4039  */
4040 static void iwk_eep_sem_up(iwk_sc_t *sc)
4041 {
4042 	uint32_t tmp;
4043 
4044 	tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG);
4045 	IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4046 	    tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM));
4047 }
4048 
4049 /*
4050  * This function load all infomation in eeprom into iwk_eep
4051  * structure in iwk_sc_t structure
4052  */
4053 static int iwk_eep_load(iwk_sc_t *sc)
4054 {
4055 	int i, rr;
4056 	uint32_t rv, tmp, eep_gp;
4057 	uint16_t addr, eep_sz = sizeof (sc->sc_eep_map);
4058 	uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map;
4059 
4060 	/* read eeprom gp register in CSR */
4061 	eep_gp = IWK_READ(sc, CSR_EEPROM_GP);
4062 	if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) ==
4063 	    CSR_EEPROM_GP_BAD_SIGNATURE) {
4064 		cmn_err(CE_WARN, "EEPROM not found\n");
4065 		return (IWK_FAIL);
4066 	}
4067 
4068 	rr = iwk_eep_sem_down(sc);
4069 	if (rr != 0) {
4070 		cmn_err(CE_WARN, "failed to own EEPROM\n");
4071 		return (IWK_FAIL);
4072 	}
4073 
4074 	for (addr = 0; addr < eep_sz; addr += 2) {
4075 		IWK_WRITE(sc, CSR_EEPROM_REG, addr<<1);
4076 		tmp = IWK_READ(sc, CSR_EEPROM_REG);
4077 		IWK_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2));
4078 
4079 		for (i = 0; i < 10; i++) {
4080 			rv = IWK_READ(sc, CSR_EEPROM_REG);
4081 			if (rv & 1)
4082 				break;
4083 			DELAY(10);
4084 		}
4085 
4086 		if (!(rv & 1)) {
4087 			cmn_err(CE_WARN, "time out when read EEPROM\n");
4088 			iwk_eep_sem_up(sc);
4089 			return (IWK_FAIL);
4090 		}
4091 
4092 		eep_p[addr/2] = rv >> 16;
4093 	}
4094 
4095 	iwk_eep_sem_up(sc);
4096 	return (IWK_SUCCESS);
4097 }
4098 
4099 /*
4100  * init mac address in ieee80211com_t struct
4101  */
4102 static void iwk_get_mac_from_eep(iwk_sc_t *sc)
4103 {
4104 	ieee80211com_t *ic = &sc->sc_ic;
4105 	struct iwk_eep *ep = &sc->sc_eep_map;
4106 
4107 	IEEE80211_ADDR_COPY(ic->ic_macaddr, ep->mac_address);
4108 
4109 	IWK_DBG((IWK_DEBUG_EEPROM, "mac:%2x:%2x:%2x:%2x:%2x:%2x\n",
4110 	    ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2],
4111 	    ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5]));
4112 }
4113 
4114 static int
4115 iwk_init(iwk_sc_t *sc)
4116 {
4117 	int qid, n, err;
4118 	clock_t clk;
4119 	uint32_t tmp;
4120 
4121 	mutex_enter(&sc->sc_glock);
4122 	sc->sc_flags &= ~IWK_F_FW_INIT;
4123 
4124 	(void) iwk_preinit(sc);
4125 
4126 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
4127 	if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) {
4128 		cmn_err(CE_WARN, "iwk_init(): Radio transmitter is off\n");
4129 		goto fail1;
4130 	}
4131 
4132 	/* init Rx ring */
4133 	iwk_mac_access_enter(sc);
4134 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
4135 
4136 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
4137 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
4138 	    sc->sc_rxq.dma_desc.cookie.dmac_address >> 8);
4139 
4140 	IWK_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG,
4141 	    ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address +
4142 	    offsetof(struct iwk_shared, val0)) >> 4));
4143 
4144 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG,
4145 	    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
4146 	    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
4147 	    IWK_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
4148 	    (RX_QUEUE_SIZE_LOG <<
4149 	    FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
4150 	iwk_mac_access_exit(sc);
4151 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG,
4152 	    (RX_QUEUE_SIZE - 1) & ~0x7);
4153 
4154 	/* init Tx rings */
4155 	iwk_mac_access_enter(sc);
4156 	iwk_reg_write(sc, SCD_TXFACT, 0);
4157 
4158 	/* keep warm page */
4159 	iwk_reg_write(sc, IWK_FH_KW_MEM_ADDR_REG,
4160 	    sc->sc_dma_kw.cookie.dmac_address >> 4);
4161 
4162 	for (qid = 0; qid < IWK_NUM_QUEUES; qid++) {
4163 		IWK_WRITE(sc, FH_MEM_CBBC_QUEUE(qid),
4164 		    sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8);
4165 		IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(qid),
4166 		    IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
4167 		    IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
4168 	}
4169 	iwk_mac_access_exit(sc);
4170 
4171 	/* clear "radio off" and "disable command" bits */
4172 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
4173 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR,
4174 	    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4175 
4176 	/* clear any pending interrupts */
4177 	IWK_WRITE(sc, CSR_INT, 0xffffffff);
4178 
4179 	/* enable interrupts */
4180 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
4181 
4182 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
4183 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
4184 
4185 	/*
4186 	 * backup ucode data part for future use.
4187 	 */
4188 	(void) memcpy(sc->sc_dma_fw_data_bak.mem_va,
4189 	    sc->sc_dma_fw_data.mem_va,
4190 	    sc->sc_dma_fw_data.alength);
4191 
4192 	for (n = 0; n < 2; n++) {
4193 		/* load firmware init segment into NIC */
4194 		err = iwk_load_firmware(sc);
4195 		if (err != IWK_SUCCESS) {
4196 			cmn_err(CE_WARN, "iwk_init(): "
4197 			    "failed to setup boot firmware\n");
4198 			continue;
4199 		}
4200 
4201 		/* now press "execute" start running */
4202 		IWK_WRITE(sc, CSR_RESET, 0);
4203 		break;
4204 	}
4205 	if (n == 2) {
4206 		cmn_err(CE_WARN, "iwk_init(): failed to load firmware\n");
4207 		goto fail1;
4208 	}
4209 	/* ..and wait at most one second for adapter to initialize */
4210 	clk = ddi_get_lbolt() + drv_usectohz(2000000);
4211 	while (!(sc->sc_flags & IWK_F_FW_INIT)) {
4212 		if (cv_timedwait(&sc->sc_fw_cv, &sc->sc_glock, clk) < 0)
4213 			break;
4214 	}
4215 	if (!(sc->sc_flags & IWK_F_FW_INIT)) {
4216 		cmn_err(CE_WARN,
4217 		    "iwk_init(): timeout waiting for firmware init\n");
4218 		goto fail1;
4219 	}
4220 
4221 	/*
4222 	 * at this point, the firmware is loaded OK, then config the hardware
4223 	 * with the ucode API, including rxon, txpower, etc.
4224 	 */
4225 	err = iwk_config(sc);
4226 	if (err) {
4227 		cmn_err(CE_WARN, "iwk_init(): failed to configure device\n");
4228 		goto fail1;
4229 	}
4230 
4231 	/* at this point, hardware may receive beacons :) */
4232 	mutex_exit(&sc->sc_glock);
4233 	return (IWK_SUCCESS);
4234 
4235 fail1:
4236 	err = IWK_FAIL;
4237 	mutex_exit(&sc->sc_glock);
4238 	return (err);
4239 }
4240 
4241 static void
4242 iwk_stop(iwk_sc_t *sc)
4243 {
4244 	uint32_t tmp;
4245 	int i;
4246 
4247 	if (!(sc->sc_flags & IWK_F_QUIESCED))
4248 		mutex_enter(&sc->sc_glock);
4249 
4250 	IWK_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
4251 	/* disable interrupts */
4252 	IWK_WRITE(sc, CSR_INT_MASK, 0);
4253 	IWK_WRITE(sc, CSR_INT, CSR_INI_SET_MASK);
4254 	IWK_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff);
4255 
4256 	/* reset all Tx rings */
4257 	for (i = 0; i < IWK_NUM_QUEUES; i++)
4258 		iwk_reset_tx_ring(sc, &sc->sc_txq[i]);
4259 
4260 	/* reset Rx ring */
4261 	iwk_reset_rx_ring(sc);
4262 
4263 	iwk_mac_access_enter(sc);
4264 	iwk_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4265 	iwk_mac_access_exit(sc);
4266 
4267 	DELAY(5);
4268 
4269 	iwk_stop_master(sc);
4270 
4271 	sc->sc_tx_timer = 0;
4272 	sc->sc_flags &= ~IWK_F_SCANNING;
4273 	sc->sc_scan_pending = 0;
4274 
4275 	tmp = IWK_READ(sc, CSR_RESET);
4276 	IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET);
4277 
4278 	if (!(sc->sc_flags & IWK_F_QUIESCED))
4279 		mutex_exit(&sc->sc_glock);
4280 }
4281 
4282 /*
4283  * Naive implementation of the Adaptive Multi Rate Retry algorithm:
4284  * "IEEE 802.11 Rate Adaptation: A Practical Approach"
4285  * Mathieu Lacage, Hossein Manshaei, Thierry Turletti
4286  * INRIA Sophia - Projet Planete
4287  * http://www-sop.inria.fr/rapports/sophia/RR-5208.html
4288  */
4289 #define	is_success(amrr)	\
4290 	((amrr)->retrycnt < (amrr)->txcnt / 10)
4291 #define	is_failure(amrr)	\
4292 	((amrr)->retrycnt > (amrr)->txcnt / 3)
4293 #define	is_enough(amrr)		\
4294 	((amrr)->txcnt > 100)
4295 #define	is_min_rate(in)		\
4296 	((in)->in_txrate == 0)
4297 #define	is_max_rate(in)		\
4298 	((in)->in_txrate == (in)->in_rates.ir_nrates - 1)
4299 #define	increase_rate(in)	\
4300 	((in)->in_txrate++)
4301 #define	decrease_rate(in)	\
4302 	((in)->in_txrate--)
4303 #define	reset_cnt(amrr)		\
4304 	{ (amrr)->txcnt = (amrr)->retrycnt = 0; }
4305 
4306 #define	IWK_AMRR_MIN_SUCCESS_THRESHOLD	 1
4307 #define	IWK_AMRR_MAX_SUCCESS_THRESHOLD	15
4308 
4309 static void
4310 iwk_amrr_init(iwk_amrr_t *amrr)
4311 {
4312 	amrr->success = 0;
4313 	amrr->recovery = 0;
4314 	amrr->txcnt = amrr->retrycnt = 0;
4315 	amrr->success_threshold = IWK_AMRR_MIN_SUCCESS_THRESHOLD;
4316 }
4317 
4318 static void
4319 iwk_amrr_timeout(iwk_sc_t *sc)
4320 {
4321 	ieee80211com_t *ic = &sc->sc_ic;
4322 
4323 	IWK_DBG((IWK_DEBUG_RATECTL, "iwk_amrr_timeout() enter\n"));
4324 	if (ic->ic_opmode == IEEE80211_M_STA)
4325 		iwk_amrr_ratectl(NULL, ic->ic_bss);
4326 	else
4327 		ieee80211_iterate_nodes(&ic->ic_sta, iwk_amrr_ratectl, NULL);
4328 	sc->sc_clk = ddi_get_lbolt();
4329 }
4330 
4331 /* ARGSUSED */
4332 static void
4333 iwk_amrr_ratectl(void *arg, ieee80211_node_t *in)
4334 {
4335 	iwk_amrr_t *amrr = (iwk_amrr_t *)in;
4336 	int need_change = 0;
4337 
4338 	if (is_success(amrr) && is_enough(amrr)) {
4339 		amrr->success++;
4340 		if (amrr->success >= amrr->success_threshold &&
4341 		    !is_max_rate(in)) {
4342 			amrr->recovery = 1;
4343 			amrr->success = 0;
4344 			increase_rate(in);
4345 			IWK_DBG((IWK_DEBUG_RATECTL,
4346 			    "AMRR increasing rate %d (txcnt=%d retrycnt=%d)\n",
4347 			    in->in_txrate, amrr->txcnt, amrr->retrycnt));
4348 			need_change = 1;
4349 		} else {
4350 			amrr->recovery = 0;
4351 		}
4352 	} else if (is_failure(amrr)) {
4353 		amrr->success = 0;
4354 		if (!is_min_rate(in)) {
4355 			if (amrr->recovery) {
4356 				amrr->success_threshold++;
4357 				if (amrr->success_threshold >
4358 				    IWK_AMRR_MAX_SUCCESS_THRESHOLD)
4359 					amrr->success_threshold =
4360 					    IWK_AMRR_MAX_SUCCESS_THRESHOLD;
4361 			} else {
4362 				amrr->success_threshold =
4363 				    IWK_AMRR_MIN_SUCCESS_THRESHOLD;
4364 			}
4365 			decrease_rate(in);
4366 			IWK_DBG((IWK_DEBUG_RATECTL,
4367 			    "AMRR decreasing rate %d (txcnt=%d retrycnt=%d)\n",
4368 			    in->in_txrate, amrr->txcnt, amrr->retrycnt));
4369 			need_change = 1;
4370 		}
4371 		amrr->recovery = 0;	/* paper is incorrect */
4372 	}
4373 
4374 	if (is_enough(amrr) || need_change)
4375 		reset_cnt(amrr);
4376 }
4377 
4378 /*
4379  * calculate 4965 chipset's kelvin temperature according to
4380  * the data of init alive and satistics notification.
4381  * The details is described in iwk_calibration.h file
4382  */
4383 static int32_t iwk_curr_tempera(iwk_sc_t *sc)
4384 {
4385 	int32_t  tempera;
4386 	int32_t  r1, r2, r3;
4387 	uint32_t  r4_u;
4388 	int32_t   r4_s;
4389 
4390 	if (iwk_is_fat_channel(sc)) {
4391 		r1 = (int32_t)(sc->sc_card_alive_init.therm_r1[1]);
4392 		r2 = (int32_t)(sc->sc_card_alive_init.therm_r2[1]);
4393 		r3 = (int32_t)(sc->sc_card_alive_init.therm_r3[1]);
4394 		r4_u = sc->sc_card_alive_init.therm_r4[1];
4395 	} else {
4396 		r1 = (int32_t)(sc->sc_card_alive_init.therm_r1[0]);
4397 		r2 = (int32_t)(sc->sc_card_alive_init.therm_r2[0]);
4398 		r3 = (int32_t)(sc->sc_card_alive_init.therm_r3[0]);
4399 		r4_u = sc->sc_card_alive_init.therm_r4[0];
4400 	}
4401 
4402 	if (sc->sc_flags & IWK_F_STATISTICS) {
4403 		r4_s = (int32_t)(sc->sc_statistics.general.temperature <<
4404 		    (31-23)) >> (31-23);
4405 	} else {
4406 		r4_s = (int32_t)(r4_u << (31-23)) >> (31-23);
4407 	}
4408 
4409 	IWK_DBG((IWK_DEBUG_CALIBRATION, "temperature R[1-4]: %d %d %d %d\n",
4410 	    r1, r2, r3, r4_s));
4411 
4412 	if (r3 == r1) {
4413 		cmn_err(CE_WARN, "iwk_curr_tempera(): "
4414 		    "failed to calculate temperature"
4415 		    "because r3 = r1\n");
4416 		return (DDI_FAILURE);
4417 	}
4418 
4419 	tempera = TEMPERATURE_CALIB_A_VAL * (r4_s - r2);
4420 	tempera /= (r3 - r1);
4421 	tempera = (tempera*97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
4422 
4423 	IWK_DBG((IWK_DEBUG_CALIBRATION, "calculated temperature: %dK, %dC\n",
4424 	    tempera, KELVIN_TO_CELSIUS(tempera)));
4425 
4426 	return (tempera);
4427 }
4428 
4429 /* Determine whether 4965 is using 2.4 GHz band */
4430 static inline int iwk_is_24G_band(iwk_sc_t *sc)
4431 {
4432 	return (sc->sc_config.flags & RXON_FLG_BAND_24G_MSK);
4433 }
4434 
4435 /* Determine whether 4965 is using fat channel */
4436 static inline int iwk_is_fat_channel(iwk_sc_t *sc)
4437 {
4438 	return ((sc->sc_config.flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
4439 	    (sc->sc_config.flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK));
4440 }
4441 
4442 /*
4443  * In MIMO mode, determine which group 4965's current channel belong to.
4444  * For more infomation about "channel group",
4445  * please refer to iwk_calibration.h file
4446  */
4447 static int iwk_txpower_grp(uint16_t channel)
4448 {
4449 	if (channel >= CALIB_IWK_TX_ATTEN_GR5_FCH &&
4450 	    channel <= CALIB_IWK_TX_ATTEN_GR5_LCH) {
4451 		return (CALIB_CH_GROUP_5);
4452 	}
4453 
4454 	if (channel >= CALIB_IWK_TX_ATTEN_GR1_FCH &&
4455 	    channel <= CALIB_IWK_TX_ATTEN_GR1_LCH) {
4456 		return (CALIB_CH_GROUP_1);
4457 	}
4458 
4459 	if (channel >= CALIB_IWK_TX_ATTEN_GR2_FCH &&
4460 	    channel <= CALIB_IWK_TX_ATTEN_GR2_LCH) {
4461 		return (CALIB_CH_GROUP_2);
4462 	}
4463 
4464 	if (channel >= CALIB_IWK_TX_ATTEN_GR3_FCH &&
4465 	    channel <= CALIB_IWK_TX_ATTEN_GR3_LCH) {
4466 		return (CALIB_CH_GROUP_3);
4467 	}
4468 
4469 	if (channel >= CALIB_IWK_TX_ATTEN_GR4_FCH &&
4470 	    channel <= CALIB_IWK_TX_ATTEN_GR4_LCH) {
4471 		return (CALIB_CH_GROUP_4);
4472 	}
4473 
4474 	cmn_err(CE_WARN, "iwk_txpower_grp(): "
4475 	    "can't find txpower group for channel %d.\n", channel);
4476 
4477 	return (DDI_FAILURE);
4478 }
4479 
4480 /* 2.4 GHz */
4481 static uint16_t iwk_eep_band_1[14] = {
4482 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
4483 };
4484 
4485 /* 5.2 GHz bands */
4486 static uint16_t iwk_eep_band_2[13] = {
4487 	183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
4488 };
4489 
4490 static uint16_t iwk_eep_band_3[12] = {
4491 	34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
4492 };
4493 
4494 static uint16_t iwk_eep_band_4[11] = {
4495 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
4496 };
4497 
4498 static uint16_t iwk_eep_band_5[6] = {
4499 	145, 149, 153, 157, 161, 165
4500 };
4501 
4502 static uint16_t iwk_eep_band_6[7] = {
4503 	1, 2, 3, 4, 5, 6, 7
4504 };
4505 
4506 static uint16_t iwk_eep_band_7[11] = {
4507 	36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
4508 };
4509 
4510 /* Get regulatory data from eeprom for a given channel */
4511 static struct iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc,
4512     uint16_t channel,
4513     int is_24G, int is_fat, int is_hi_chan)
4514 {
4515 	int32_t i;
4516 	uint16_t chan;
4517 
4518 	if (is_fat) {  /* 11n mode */
4519 
4520 		if (is_hi_chan) {
4521 			chan = channel - 4;
4522 		} else {
4523 			chan = channel;
4524 		}
4525 
4526 		for (i = 0; i < 7; i++) {
4527 			if (iwk_eep_band_6[i] == chan) {
4528 				return (&sc->sc_eep_map.band_24_channels[i]);
4529 			}
4530 		}
4531 		for (i = 0; i < 11; i++) {
4532 			if (iwk_eep_band_7[i] == chan) {
4533 				return (&sc->sc_eep_map.band_52_channels[i]);
4534 			}
4535 		}
4536 	} else if (is_24G) {  /* 2.4 GHz band */
4537 		for (i = 0; i < 14; i++) {
4538 			if (iwk_eep_band_1[i] == channel) {
4539 				return (&sc->sc_eep_map.band_1_channels[i]);
4540 			}
4541 		}
4542 	} else {  /* 5 GHz band */
4543 		for (i = 0; i < 13; i++) {
4544 			if (iwk_eep_band_2[i] == channel) {
4545 				return (&sc->sc_eep_map.band_2_channels[i]);
4546 			}
4547 		}
4548 		for (i = 0; i < 12; i++) {
4549 			if (iwk_eep_band_3[i] == channel) {
4550 				return (&sc->sc_eep_map.band_3_channels[i]);
4551 			}
4552 		}
4553 		for (i = 0; i < 11; i++) {
4554 			if (iwk_eep_band_4[i] == channel) {
4555 				return (&sc->sc_eep_map.band_4_channels[i]);
4556 			}
4557 		}
4558 		for (i = 0; i < 6; i++) {
4559 			if (iwk_eep_band_5[i] == channel) {
4560 				return (&sc->sc_eep_map.band_5_channels[i]);
4561 			}
4562 		}
4563 	}
4564 
4565 	return (NULL);
4566 }
4567 
4568 /*
4569  * Determine which subband a given channel belongs
4570  * to in 2.4 GHz or 5 GHz band
4571  */
4572 static int32_t iwk_band_number(iwk_sc_t *sc, uint16_t channel)
4573 {
4574 	int32_t b_n = -1;
4575 
4576 	for (b_n = 0; b_n < EEP_TX_POWER_BANDS; b_n++) {
4577 		if (0 == sc->sc_eep_map.calib_info.band_info_tbl[b_n].ch_from) {
4578 			continue;
4579 		}
4580 
4581 		if ((channel >=
4582 		    (uint16_t)sc->sc_eep_map.calib_info.
4583 		    band_info_tbl[b_n].ch_from) &&
4584 		    (channel <=
4585 		    (uint16_t)sc->sc_eep_map.calib_info.
4586 		    band_info_tbl[b_n].ch_to)) {
4587 			break;
4588 		}
4589 	}
4590 
4591 	return (b_n);
4592 }
4593 
4594 /* Make a special division for interpolation operation */
4595 static int iwk_division(int32_t num, int32_t denom, int32_t *res)
4596 {
4597 	int32_t sign = 1;
4598 
4599 	if (num < 0) {
4600 		sign = -sign;
4601 		num = -num;
4602 	}
4603 
4604 	if (denom < 0) {
4605 		sign = -sign;
4606 		denom = -denom;
4607 	}
4608 
4609 	*res = ((num*2 + denom) / (denom*2)) * sign;
4610 
4611 	return (IWK_SUCCESS);
4612 }
4613 
4614 /* Make interpolation operation */
4615 static int32_t iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1,
4616     int32_t x2, int32_t y2)
4617 {
4618 	int32_t val;
4619 
4620 	if (x2 == x1) {
4621 		return (y1);
4622 	} else {
4623 		(void) iwk_division((x2-x)*(y1-y2), (x2-x1), &val);
4624 		return (val + y2);
4625 	}
4626 }
4627 
4628 /* Get interpolation measurement data of a given channel for all chains. */
4629 static int iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel,
4630     struct iwk_eep_calib_channel_info *chan_info)
4631 {
4632 	int32_t ban_n;
4633 	uint32_t ch1_n, ch2_n;
4634 	int32_t c, m;
4635 	struct iwk_eep_calib_measure *m1_p, *m2_p, *m_p;
4636 
4637 	/* determine subband number */
4638 	ban_n = iwk_band_number(sc, channel);
4639 	if (ban_n >= EEP_TX_POWER_BANDS) {
4640 		return (DDI_FAILURE);
4641 	}
4642 
4643 	ch1_n =
4644 	    (uint32_t)sc->sc_eep_map.calib_info.band_info_tbl[ban_n].ch1.ch_num;
4645 	ch2_n =
4646 	    (uint32_t)sc->sc_eep_map.calib_info.band_info_tbl[ban_n].ch2.ch_num;
4647 
4648 	chan_info->ch_num = (uint8_t)channel;  /* given channel number */
4649 
4650 	/*
4651 	 * go through all chains on chipset
4652 	 */
4653 	for (c = 0; c < EEP_TX_POWER_TX_CHAINS; c++) {
4654 		/*
4655 		 * go through all factory measurements
4656 		 */
4657 		for (m = 0; m < EEP_TX_POWER_MEASUREMENTS; m++) {
4658 			m1_p =
4659 			    &(sc->sc_eep_map.calib_info.
4660 			    band_info_tbl[ban_n].ch1.measure[c][m]);
4661 			m2_p =
4662 			    &(sc->sc_eep_map.calib_info.band_info_tbl[ban_n].
4663 			    ch2.measure[c][m]);
4664 			m_p = &(chan_info->measure[c][m]);
4665 
4666 			/*
4667 			 * make interpolation to get actual
4668 			 * Tx power for given channel
4669 			 */
4670 			m_p->actual_pow = iwk_interpolate_value(channel,
4671 			    ch1_n, m1_p->actual_pow,
4672 			    ch2_n, m2_p->actual_pow);
4673 
4674 			/* make interpolation to get index into gain table */
4675 			m_p->gain_idx = iwk_interpolate_value(channel,
4676 			    ch1_n, m1_p->gain_idx,
4677 			    ch2_n, m2_p->gain_idx);
4678 
4679 			/* make interpolation to get chipset temperature */
4680 			m_p->temperature = iwk_interpolate_value(channel,
4681 			    ch1_n, m1_p->temperature,
4682 			    ch2_n, m2_p->temperature);
4683 
4684 			/*
4685 			 * make interpolation to get power
4686 			 * amp detector level
4687 			 */
4688 			m_p->pa_det = iwk_interpolate_value(channel, ch1_n,
4689 			    m1_p->pa_det,
4690 			    ch2_n, m2_p->pa_det);
4691 		}
4692 	}
4693 
4694 	return (IWK_SUCCESS);
4695 }
4696 
4697 /*
4698  * Calculate voltage compensation for Tx power. For more infomation,
4699  * please refer to iwk_calibration.h file
4700  */
4701 static int32_t iwk_voltage_compensation(int32_t eep_voltage,
4702     int32_t curr_voltage)
4703 {
4704 	int32_t vol_comp = 0;
4705 
4706 	if ((TX_POWER_IWK_ILLEGAL_VOLTAGE == eep_voltage) ||
4707 	    (TX_POWER_IWK_ILLEGAL_VOLTAGE == curr_voltage)) {
4708 		return (vol_comp);
4709 	}
4710 
4711 	(void) iwk_division(curr_voltage-eep_voltage,
4712 	    TX_POWER_IWK_VOLTAGE_CODES_PER_03V, &vol_comp);
4713 
4714 	if (curr_voltage > eep_voltage) {
4715 		vol_comp *= 2;
4716 	}
4717 	if ((vol_comp < -2) || (vol_comp > 2)) {
4718 		vol_comp = 0;
4719 	}
4720 
4721 	return (vol_comp);
4722 }
4723 
4724 /*
4725  * Thermal compensation values for txpower for various frequency ranges ...
4726  * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust
4727  */
4728 static struct iwk_txpower_tempera_comp {
4729 	int32_t degrees_per_05db_a;
4730 	int32_t degrees_per_05db_a_denom;
4731 } txpower_tempera_comp_table[CALIB_CH_GROUP_MAX] = {
4732 	{9, 2},			/* group 0 5.2, ch  34-43 */
4733 	{4, 1},			/* group 1 5.2, ch  44-70 */
4734 	{4, 1},			/* group 2 5.2, ch  71-124 */
4735 	{4, 1},			/* group 3 5.2, ch 125-200 */
4736 	{3, 1}			/* group 4 2.4, ch   all */
4737 };
4738 
4739 /*
4740  * bit-rate-dependent table to prevent Tx distortion, in half-dB units,
4741  * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates.
4742  */
4743 static int32_t back_off_table[] = {
4744 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
4745 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
4746 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
4747 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
4748 	10			/* CCK */
4749 };
4750 
4751 /* determine minimum Tx power index in gain table */
4752 static int32_t iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G)
4753 {
4754 	if ((!is_24G) && ((rate_pow_idx & 7) <= 4)) {
4755 		return (MIN_TX_GAIN_INDEX_52GHZ_EXT);
4756 	}
4757 
4758 	return (MIN_TX_GAIN_INDEX);
4759 }
4760 
4761 /*
4762  * Determine DSP and radio gain according to temperature and other factors.
4763  * This function is the majority of Tx power calibration
4764  */
4765 static int iwk_txpower_table_cmd_init(iwk_sc_t *sc,
4766     struct iwk_tx_power_db *tp_db)
4767 {
4768 	int is_24G, is_fat, is_high_chan, is_mimo;
4769 	int c, r;
4770 	int32_t target_power;
4771 	int32_t tx_grp = CALIB_CH_GROUP_MAX;
4772 	uint16_t channel;
4773 	uint8_t saturation_power;
4774 	int32_t regu_power;
4775 	int32_t curr_regu_power;
4776 	struct iwk_eep_channel *eep_chan_p;
4777 	struct iwk_eep_calib_channel_info eep_chan_calib;
4778 	int32_t eep_voltage, init_voltage;
4779 	int32_t voltage_compensation;
4780 	int32_t temperature;
4781 	int32_t degrees_per_05db_num;
4782 	int32_t degrees_per_05db_denom;
4783 	struct iwk_eep_calib_measure *measure_p;
4784 	int32_t interpo_temp;
4785 	int32_t power_limit;
4786 	int32_t atten_value;
4787 	int32_t tempera_comp[2];
4788 	int32_t interpo_gain_idx[2];
4789 	int32_t interpo_actual_pow[2];
4790 	union iwk_tx_power_dual_stream txpower_gains;
4791 	int32_t txpower_gains_idx;
4792 
4793 	channel = sc->sc_config.chan;
4794 
4795 	/* 2.4 GHz or 5 GHz band */
4796 	is_24G = iwk_is_24G_band(sc);
4797 
4798 	/* fat channel or not */
4799 	is_fat = iwk_is_fat_channel(sc);
4800 
4801 	/*
4802 	 * using low half channel number or high half channel number
4803 	 * identify fat channel
4804 	 */
4805 	if (is_fat && (sc->sc_config.flags &
4806 	    RXON_FLG_CONTROL_CHANNEL_LOC_HIGH_MSK)) {
4807 		is_high_chan = 1;
4808 	}
4809 
4810 	if ((channel > 0) && (channel < 200)) {
4811 		/* get regulatory channel data from eeprom */
4812 		eep_chan_p = iwk_get_eep_channel(sc, channel, is_24G,
4813 		    is_fat, is_high_chan);
4814 		if (NULL == eep_chan_p) {
4815 			cmn_err(CE_WARN,
4816 			    "iwk_txpower_table_cmd_init(): "
4817 			    "can't get channel infomation\n");
4818 			return (DDI_FAILURE);
4819 		}
4820 	} else {
4821 		cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4822 		    "channel(%d) isn't in proper range\n",
4823 		    channel);
4824 		return (DDI_FAILURE);
4825 	}
4826 
4827 	/* initial value of Tx power */
4828 	sc->sc_user_txpower = (int32_t)eep_chan_p->max_power_avg;
4829 	if (sc->sc_user_txpower < IWK_TX_POWER_TARGET_POWER_MIN) {
4830 		cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4831 		    "user TX power is too weak\n");
4832 		return (DDI_FAILURE);
4833 	} else if (sc->sc_user_txpower > IWK_TX_POWER_TARGET_POWER_MAX) {
4834 		cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4835 		    "user TX power is too strong\n");
4836 		return (DDI_FAILURE);
4837 	}
4838 
4839 	target_power = 2 * sc->sc_user_txpower;
4840 
4841 	/* determine which group current channel belongs to */
4842 	tx_grp = iwk_txpower_grp(channel);
4843 	if (tx_grp < 0) {
4844 		return (tx_grp);
4845 	}
4846 
4847 
4848 	if (is_fat) {
4849 		if (is_high_chan) {
4850 			channel -= 2;
4851 		} else {
4852 			channel += 2;
4853 		}
4854 	}
4855 
4856 	/* determine saturation power */
4857 	if (is_24G) {
4858 		saturation_power =
4859 		    sc->sc_eep_map.calib_info.saturation_power24;
4860 	} else {
4861 		saturation_power =
4862 		    sc->sc_eep_map.calib_info.saturation_power52;
4863 	}
4864 
4865 	if (saturation_power < IWK_TX_POWER_SATURATION_MIN ||
4866 	    saturation_power > IWK_TX_POWER_SATURATION_MAX) {
4867 		if (is_24G) {
4868 			saturation_power = IWK_TX_POWER_DEFAULT_SATURATION_24;
4869 		} else {
4870 			saturation_power = IWK_TX_POWER_DEFAULT_SATURATION_52;
4871 		}
4872 	}
4873 
4874 	/* determine regulatory power */
4875 	regu_power = (int32_t)eep_chan_p->max_power_avg * 2;
4876 	if ((regu_power < IWK_TX_POWER_REGULATORY_MIN) ||
4877 	    (regu_power > IWK_TX_POWER_REGULATORY_MAX)) {
4878 		if (is_24G) {
4879 			regu_power = IWK_TX_POWER_DEFAULT_REGULATORY_24;
4880 		} else {
4881 			regu_power = IWK_TX_POWER_DEFAULT_REGULATORY_52;
4882 		}
4883 	}
4884 
4885 	/*
4886 	 * get measurement data for current channel
4887 	 * suach as temperature,index to gain table,actual Tx power
4888 	 */
4889 	(void) iwk_channel_interpolate(sc, channel, &eep_chan_calib);
4890 
4891 	eep_voltage = (int32_t)sc->sc_eep_map.calib_info.voltage;
4892 	init_voltage = (int32_t)sc->sc_card_alive_init.voltage;
4893 
4894 	/* calculate voltage compensation to Tx power */
4895 	voltage_compensation =
4896 	    iwk_voltage_compensation(eep_voltage, init_voltage);
4897 
4898 	if (sc->sc_tempera >= IWK_TX_POWER_TEMPERATURE_MIN) {
4899 		temperature = sc->sc_tempera;
4900 	} else {
4901 		temperature = IWK_TX_POWER_TEMPERATURE_MIN;
4902 	}
4903 	if (sc->sc_tempera <= IWK_TX_POWER_TEMPERATURE_MAX) {
4904 		temperature = sc->sc_tempera;
4905 	} else {
4906 		temperature = IWK_TX_POWER_TEMPERATURE_MAX;
4907 	}
4908 	temperature = KELVIN_TO_CELSIUS(temperature);
4909 
4910 	degrees_per_05db_num =
4911 	    txpower_tempera_comp_table[tx_grp].degrees_per_05db_a;
4912 	degrees_per_05db_denom =
4913 	    txpower_tempera_comp_table[tx_grp].degrees_per_05db_a_denom;
4914 
4915 	for (c = 0; c < 2; c++) {  /* go through all chains */
4916 		measure_p = &eep_chan_calib.measure[c][1];
4917 		interpo_temp = measure_p->temperature;
4918 
4919 		/* determine temperature compensation to Tx power */
4920 		(void) iwk_division(
4921 		    (temperature-interpo_temp)*degrees_per_05db_denom,
4922 		    degrees_per_05db_num, &tempera_comp[c]);
4923 
4924 		interpo_gain_idx[c] = measure_p->gain_idx;
4925 		interpo_actual_pow[c] = measure_p->actual_pow;
4926 	}
4927 
4928 	/*
4929 	 * go through all rate entries in Tx power table
4930 	 */
4931 	for (r = 0; r < POWER_TABLE_NUM_ENTRIES; r++) {
4932 		if (r & 0x8) {
4933 			/* need to lower regulatory power for MIMO mode */
4934 			curr_regu_power = regu_power -
4935 			    IWK_TX_POWER_MIMO_REGULATORY_COMPENSATION;
4936 			is_mimo = 1;
4937 		} else {
4938 			curr_regu_power = regu_power;
4939 			is_mimo = 0;
4940 		}
4941 
4942 		power_limit = saturation_power - back_off_table[r];
4943 		if (power_limit > curr_regu_power) {
4944 			/* final Tx power limit */
4945 			power_limit = curr_regu_power;
4946 		}
4947 
4948 		if (target_power > power_limit) {
4949 			target_power = power_limit; /* final target Tx power */
4950 		}
4951 
4952 		for (c = 0; c < 2; c++) {	  /* go through all Tx chains */
4953 			if (is_mimo) {
4954 				atten_value =
4955 				    sc->sc_card_alive_init.tx_atten[tx_grp][c];
4956 			} else {
4957 				atten_value = 0;
4958 			}
4959 
4960 			/*
4961 			 * calculate index in gain table
4962 			 * this step is very important
4963 			 */
4964 			txpower_gains_idx = interpo_gain_idx[c] -
4965 			    (target_power - interpo_actual_pow[c]) -
4966 			    tempera_comp[c] - voltage_compensation +
4967 			    atten_value;
4968 
4969 			if (txpower_gains_idx <
4970 			    iwk_min_power_index(r, is_24G)) {
4971 				txpower_gains_idx =
4972 				    iwk_min_power_index(r, is_24G);
4973 			}
4974 
4975 			if (!is_24G) {
4976 				/*
4977 				 * support negative index for 5 GHz
4978 				 * band
4979 				 */
4980 				txpower_gains_idx += 9;
4981 			}
4982 
4983 			if (POWER_TABLE_CCK_ENTRY == r) {
4984 				/* for CCK mode, make necessary attenuaton */
4985 				txpower_gains_idx +=
4986 				    IWK_TX_POWER_CCK_COMPENSATION_C_STEP;
4987 			}
4988 
4989 			if (txpower_gains_idx > 107) {
4990 				txpower_gains_idx = 107;
4991 			} else if (txpower_gains_idx < 0) {
4992 				txpower_gains_idx = 0;
4993 			}
4994 
4995 			/* search DSP and radio gains in gain table */
4996 			txpower_gains.s.radio_tx_gain[c] =
4997 			    gains_table[is_24G][txpower_gains_idx].radio;
4998 			txpower_gains.s.dsp_predis_atten[c] =
4999 			    gains_table[is_24G][txpower_gains_idx].dsp;
5000 
5001 			IWK_DBG((IWK_DEBUG_CALIBRATION,
5002 			    "rate_index: %d, "
5003 			    "gain_index %d, c: %d,is_mimo: %d\n",
5004 			    r, txpower_gains_idx, c, is_mimo));
5005 		}
5006 
5007 		/* initialize Tx power table */
5008 		if (r < POWER_TABLE_NUM_HT_OFDM_ENTRIES) {
5009 			tp_db->ht_ofdm_power[r].dw = txpower_gains.dw;
5010 		} else {
5011 			tp_db->legacy_cck_power.dw = txpower_gains.dw;
5012 		}
5013 	}
5014 
5015 	return (IWK_SUCCESS);
5016 }
5017 
5018 /*
5019  * make Tx power calibration to adjust Tx power.
5020  * This is completed by sending out Tx power table command.
5021  */
5022 static int iwk_tx_power_calibration(iwk_sc_t *sc)
5023 {
5024 	iwk_tx_power_table_cmd_t cmd;
5025 	int rv;
5026 
5027 	if (sc->sc_flags & IWK_F_SCANNING) {
5028 		return (IWK_SUCCESS);
5029 	}
5030 
5031 	/* necessary initialization to Tx power table command */
5032 	cmd.band = (uint8_t)iwk_is_24G_band(sc);
5033 	cmd.channel = sc->sc_config.chan;
5034 	cmd.channel_normal_width = 0;
5035 
5036 	/* initialize Tx power table */
5037 	rv = iwk_txpower_table_cmd_init(sc, &cmd.tx_power);
5038 	if (rv) {
5039 		cmn_err(CE_NOTE, "rv= %d\n", rv);
5040 		return (rv);
5041 	}
5042 
5043 	/* send out Tx power table command */
5044 	rv = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &cmd, sizeof (cmd), 1);
5045 	if (rv) {
5046 		return (rv);
5047 	}
5048 
5049 	/* record current temperature */
5050 	sc->sc_last_tempera = sc->sc_tempera;
5051 
5052 	return (IWK_SUCCESS);
5053 }
5054 
5055 /* This function is the handler of statistics notification from uCode */
5056 static void iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc)
5057 {
5058 	int is_diff;
5059 	struct iwk_notif_statistics *statistics_p =
5060 	    (struct iwk_notif_statistics *)(desc + 1);
5061 
5062 	mutex_enter(&sc->sc_glock);
5063 
5064 	is_diff = (sc->sc_statistics.general.temperature !=
5065 	    statistics_p->general.temperature) ||
5066 	    ((sc->sc_statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK) !=
5067 	    (statistics_p->flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK));
5068 
5069 	/* update statistics data */
5070 	(void) memcpy(&sc->sc_statistics, statistics_p,
5071 	    sizeof (struct iwk_notif_statistics));
5072 
5073 	sc->sc_flags |= IWK_F_STATISTICS;
5074 
5075 	if (!(sc->sc_flags & IWK_F_SCANNING)) {
5076 		/* make Receiver gain balance calibration */
5077 		(void) iwk_rxgain_diff(sc);
5078 
5079 		/* make Receiver sensitivity calibration */
5080 		(void) iwk_rx_sens(sc);
5081 	}
5082 
5083 
5084 	if (!is_diff) {
5085 		mutex_exit(&sc->sc_glock);
5086 		return;
5087 	}
5088 
5089 	/* calibration current temperature of 4965 chipset */
5090 	sc->sc_tempera = iwk_curr_tempera(sc);
5091 
5092 	/* distinct temperature change will trigger Tx power calibration */
5093 	if (((sc->sc_tempera - sc->sc_last_tempera) >= 3) ||
5094 	    ((sc->sc_last_tempera - sc->sc_tempera) >= 3)) {
5095 		/* make Tx power calibration */
5096 		(void) iwk_tx_power_calibration(sc);
5097 	}
5098 
5099 	mutex_exit(&sc->sc_glock);
5100 }
5101 
5102 /* Determine this station is in associated state or not */
5103 static int iwk_is_associated(iwk_sc_t *sc)
5104 {
5105 	return (sc->sc_config.filter_flags & RXON_FILTER_ASSOC_MSK);
5106 }
5107 
5108 /* Make necessary preparation for Receiver gain balance calibration */
5109 static int iwk_rxgain_diff_init(iwk_sc_t *sc)
5110 {
5111 	int i, rv;
5112 	struct iwk_calibration_cmd cmd;
5113 	struct iwk_rx_gain_diff *gain_diff_p;
5114 
5115 	gain_diff_p = &sc->sc_rxgain_diff;
5116 
5117 	(void) memset(gain_diff_p, 0, sizeof (struct iwk_rx_gain_diff));
5118 	(void) memset(&cmd, 0, sizeof (struct iwk_calibration_cmd));
5119 
5120 	for (i = 0; i < RX_CHAINS_NUM; i++) {
5121 		gain_diff_p->gain_diff_chain[i] = CHAIN_GAIN_DIFF_INIT_VAL;
5122 	}
5123 
5124 	if (iwk_is_associated(sc)) {
5125 		cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
5126 		cmd.diff_gain_a = 0;
5127 		cmd.diff_gain_b = 0;
5128 		cmd.diff_gain_c = 0;
5129 
5130 		/* assume the gains of every Rx chains is balanceable */
5131 		rv = iwk_cmd(sc, REPLY_PHY_CALIBRATION_CMD, &cmd,
5132 		    sizeof (cmd), 1);
5133 		if (rv) {
5134 			return (rv);
5135 		}
5136 
5137 		gain_diff_p->state = IWK_GAIN_DIFF_ACCUMULATE;
5138 	}
5139 
5140 	return (IWK_SUCCESS);
5141 }
5142 
5143 /*
5144  * make Receiver gain balance to balance Rx gain between Rx chains
5145  * and determine which chain is disconnected
5146  */
5147 static int iwk_rxgain_diff(iwk_sc_t *sc)
5148 {
5149 	int i, is_24G, rv;
5150 	int max_beacon_chain_n;
5151 	int min_noise_chain_n;
5152 	uint16_t channel_n;
5153 	int32_t beacon_diff;
5154 	int32_t noise_diff;
5155 	uint32_t noise_chain_a, noise_chain_b, noise_chain_c;
5156 	uint32_t beacon_chain_a, beacon_chain_b, beacon_chain_c;
5157 	struct iwk_calibration_cmd cmd;
5158 	uint32_t beacon_aver[RX_CHAINS_NUM] = {0xFFFFFFFF};
5159 	uint32_t noise_aver[RX_CHAINS_NUM] = {0xFFFFFFFF};
5160 	struct statistics_rx_non_phy *rx_general_p =
5161 	    &sc->sc_statistics.rx.general;
5162 	struct iwk_rx_gain_diff *gain_diff_p = &sc->sc_rxgain_diff;
5163 
5164 	if (INTERFERENCE_DATA_AVAILABLE !=
5165 	    rx_general_p->interference_data_flag) {
5166 		return (IWK_SUCCESS);
5167 	}
5168 
5169 	if (IWK_GAIN_DIFF_ACCUMULATE != gain_diff_p->state) {
5170 		return (IWK_SUCCESS);
5171 	}
5172 
5173 	is_24G = iwk_is_24G_band(sc);
5174 	channel_n = sc->sc_config.chan;	 /* channel number */
5175 
5176 	if ((channel_n != (sc->sc_statistics.flag >> 16)) ||
5177 	    ((STATISTICS_REPLY_FLG_BAND_24G_MSK ==
5178 	    (sc->sc_statistics.flag & STATISTICS_REPLY_FLG_BAND_24G_MSK)) &&
5179 	    !is_24G)) {
5180 		return (IWK_SUCCESS);
5181 	}
5182 
5183 	/* Rx chain's noise strength from statistics notification */
5184 	noise_chain_a = rx_general_p->beacon_silence_rssi_a & 0xFF;
5185 	noise_chain_b = rx_general_p->beacon_silence_rssi_b & 0xFF;
5186 	noise_chain_c = rx_general_p->beacon_silence_rssi_c & 0xFF;
5187 
5188 	/* Rx chain's beacon strength from statistics notification */
5189 	beacon_chain_a = rx_general_p->beacon_rssi_a & 0xFF;
5190 	beacon_chain_b = rx_general_p->beacon_rssi_b & 0xFF;
5191 	beacon_chain_c = rx_general_p->beacon_rssi_c & 0xFF;
5192 
5193 	gain_diff_p->beacon_count++;
5194 
5195 	/* accumulate chain's noise strength */
5196 	gain_diff_p->noise_stren_a += noise_chain_a;
5197 	gain_diff_p->noise_stren_b += noise_chain_b;
5198 	gain_diff_p->noise_stren_c += noise_chain_c;
5199 
5200 	/* accumulate chain's beacon strength */
5201 	gain_diff_p->beacon_stren_a += beacon_chain_a;
5202 	gain_diff_p->beacon_stren_b += beacon_chain_b;
5203 	gain_diff_p->beacon_stren_c += beacon_chain_c;
5204 
5205 	if (BEACON_NUM_20 == gain_diff_p->beacon_count) {
5206 		/* calculate average beacon strength */
5207 		beacon_aver[0] = (gain_diff_p->beacon_stren_a) / BEACON_NUM_20;
5208 		beacon_aver[1] = (gain_diff_p->beacon_stren_b) / BEACON_NUM_20;
5209 		beacon_aver[2] = (gain_diff_p->beacon_stren_c) / BEACON_NUM_20;
5210 
5211 		/* calculate average noise strength */
5212 		noise_aver[0] = (gain_diff_p->noise_stren_a) / BEACON_NUM_20;
5213 		noise_aver[1] = (gain_diff_p->noise_stren_b) / BEACON_NUM_20;
5214 		noise_aver[2] = (gain_diff_p->noise_stren_b) / BEACON_NUM_20;
5215 
5216 		/* determine maximum beacon strength among 3 chains */
5217 		if ((beacon_aver[0] >= beacon_aver[1]) &&
5218 		    (beacon_aver[0] >= beacon_aver[2])) {
5219 			max_beacon_chain_n = 0;
5220 			gain_diff_p->connected_chains = 1 << 0;
5221 		} else if (beacon_aver[1] >= beacon_aver[2]) {
5222 			max_beacon_chain_n = 1;
5223 			gain_diff_p->connected_chains = 1 << 1;
5224 		} else {
5225 			max_beacon_chain_n = 2;
5226 			gain_diff_p->connected_chains = 1 << 2;
5227 		}
5228 
5229 		/* determine which chain is disconnected */
5230 		for (i = 0; i < RX_CHAINS_NUM; i++) {
5231 			if (i != max_beacon_chain_n) {
5232 				beacon_diff = beacon_aver[max_beacon_chain_n] -
5233 				    beacon_aver[i];
5234 				if (beacon_diff > MAX_ALLOWED_DIFF) {
5235 					gain_diff_p->disconnect_chain[i] = 1;
5236 				} else {
5237 					gain_diff_p->connected_chains |=
5238 					    (1 << i);
5239 				}
5240 			}
5241 		}
5242 
5243 		/*
5244 		 * if chain A and B are both disconnected,
5245 		 * assume the stronger in beacon strength is connected
5246 		 */
5247 		if (gain_diff_p->disconnect_chain[0] &&
5248 		    gain_diff_p->disconnect_chain[1]) {
5249 			if (beacon_aver[0] >= beacon_aver[1]) {
5250 				gain_diff_p->disconnect_chain[0] = 0;
5251 				gain_diff_p->connected_chains |= (1 << 0);
5252 			} else {
5253 				gain_diff_p->disconnect_chain[1] = 0;
5254 				gain_diff_p->connected_chains |= (1 << 1);
5255 			}
5256 		}
5257 
5258 		/* determine minimum noise strength among 3 chains */
5259 		if (!gain_diff_p->disconnect_chain[0]) {
5260 			min_noise_chain_n = 0;
5261 
5262 			for (i = 0; i < RX_CHAINS_NUM; i++) {
5263 				if (!gain_diff_p->disconnect_chain[i] &&
5264 				    (noise_aver[i] <=
5265 				    noise_aver[min_noise_chain_n])) {
5266 					min_noise_chain_n = i;
5267 				}
5268 
5269 			}
5270 		} else {
5271 			min_noise_chain_n = 1;
5272 
5273 			for (i = 0; i < RX_CHAINS_NUM; i++) {
5274 				if (!gain_diff_p->disconnect_chain[i] &&
5275 				    (noise_aver[i] <=
5276 				    noise_aver[min_noise_chain_n])) {
5277 					min_noise_chain_n = i;
5278 				}
5279 			}
5280 		}
5281 
5282 		gain_diff_p->gain_diff_chain[min_noise_chain_n] = 0;
5283 
5284 		/* determine gain difference between chains */
5285 		for (i = 0; i < RX_CHAINS_NUM; i++) {
5286 			if (!gain_diff_p->disconnect_chain[i] &&
5287 			    (CHAIN_GAIN_DIFF_INIT_VAL ==
5288 			    gain_diff_p->gain_diff_chain[i])) {
5289 
5290 				noise_diff = noise_aver[i] -
5291 				    noise_aver[min_noise_chain_n];
5292 				gain_diff_p->gain_diff_chain[i] =
5293 				    (uint8_t)((noise_diff * 10) / 15);
5294 
5295 				if (gain_diff_p->gain_diff_chain[i] > 3) {
5296 					gain_diff_p->gain_diff_chain[i] = 3;
5297 				}
5298 
5299 				gain_diff_p->gain_diff_chain[i] |= (1 << 2);
5300 			} else {
5301 				gain_diff_p->gain_diff_chain[i] = 0;
5302 			}
5303 		}
5304 
5305 		if (!gain_diff_p->gain_diff_send) {
5306 			gain_diff_p->gain_diff_send = 1;
5307 
5308 			(void) memset(&cmd, 0, sizeof (cmd));
5309 
5310 			cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
5311 			cmd.diff_gain_a = gain_diff_p->gain_diff_chain[0];
5312 			cmd.diff_gain_b = gain_diff_p->gain_diff_chain[1];
5313 			cmd.diff_gain_c = gain_diff_p->gain_diff_chain[2];
5314 
5315 			/*
5316 			 * send out PHY calibration command to
5317 			 * adjust every chain's Rx gain
5318 			 */
5319 			rv = iwk_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
5320 			    &cmd, sizeof (cmd), 1);
5321 			if (rv) {
5322 				return (rv);
5323 			}
5324 
5325 			gain_diff_p->state = IWK_GAIN_DIFF_CALIBRATED;
5326 		}
5327 
5328 		gain_diff_p->beacon_stren_a = 0;
5329 		gain_diff_p->beacon_stren_b = 0;
5330 		gain_diff_p->beacon_stren_c = 0;
5331 
5332 		gain_diff_p->noise_stren_a = 0;
5333 		gain_diff_p->noise_stren_b = 0;
5334 		gain_diff_p->noise_stren_c = 0;
5335 	}
5336 
5337 	return (IWK_SUCCESS);
5338 }
5339 
5340 /* Make necessary preparation for Receiver sensitivity calibration */
5341 static int iwk_rx_sens_init(iwk_sc_t *sc)
5342 {
5343 	int i, rv;
5344 	struct iwk_rx_sensitivity_cmd cmd;
5345 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5346 
5347 	(void) memset(&cmd, 0, sizeof (struct iwk_rx_sensitivity_cmd));
5348 	(void) memset(rx_sens_p, 0, sizeof (struct iwk_rx_sensitivity));
5349 
5350 	rx_sens_p->auto_corr_ofdm_x4 = 90;
5351 	rx_sens_p->auto_corr_mrc_ofdm_x4 = 170;
5352 	rx_sens_p->auto_corr_ofdm_x1 = 105;
5353 	rx_sens_p->auto_corr_mrc_ofdm_x1 = 220;
5354 
5355 	rx_sens_p->auto_corr_cck_x4 = 125;
5356 	rx_sens_p->auto_corr_mrc_cck_x4 = 200;
5357 	rx_sens_p->min_energy_det_cck = 100;
5358 
5359 	rx_sens_p->flags &= (~IWK_SENSITIVITY_CALIB_ALLOW_MSK);
5360 	rx_sens_p->flags &= (~IWK_SENSITIVITY_OFDM_UPDATE_MSK);
5361 	rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
5362 
5363 	rx_sens_p->last_bad_plcp_cnt_ofdm = 0;
5364 	rx_sens_p->last_false_alarm_cnt_ofdm = 0;
5365 	rx_sens_p->last_bad_plcp_cnt_cck = 0;
5366 	rx_sens_p->last_false_alarm_cnt_cck = 0;
5367 
5368 	rx_sens_p->cck_curr_state = IWK_TOO_MANY_FALSE_ALARM;
5369 	rx_sens_p->cck_prev_state = IWK_TOO_MANY_FALSE_ALARM;
5370 	rx_sens_p->cck_no_false_alarm_num = 0;
5371 	rx_sens_p->cck_beacon_idx = 0;
5372 
5373 	for (i = 0; i < 10; i++) {
5374 		rx_sens_p->cck_beacon_min[i] = 0;
5375 	}
5376 
5377 	rx_sens_p->cck_noise_idx = 0;
5378 	rx_sens_p->cck_noise_ref = 0;
5379 
5380 	for (i = 0; i < 20; i++) {
5381 		rx_sens_p->cck_noise_max[i] = 0;
5382 	}
5383 
5384 	rx_sens_p->cck_noise_diff = 0;
5385 	rx_sens_p->cck_no_false_alarm_num = 0;
5386 
5387 	cmd.control = IWK_SENSITIVITY_CONTROL_WORK_TABLE;
5388 
5389 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
5390 	    rx_sens_p->auto_corr_ofdm_x4;
5391 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
5392 	    rx_sens_p->auto_corr_mrc_ofdm_x4;
5393 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
5394 	    rx_sens_p->auto_corr_ofdm_x1;
5395 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
5396 	    rx_sens_p->auto_corr_mrc_ofdm_x1;
5397 
5398 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
5399 	    rx_sens_p->auto_corr_cck_x4;
5400 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
5401 	    rx_sens_p->auto_corr_mrc_cck_x4;
5402 	cmd.table[MIN_ENERGY_CCK_DET_IDX] = rx_sens_p->min_energy_det_cck;
5403 
5404 	cmd.table[MIN_ENERGY_OFDM_DET_IDX] = 100;
5405 	cmd.table[BARKER_CORR_TH_ADD_MIN_IDX] = 190;
5406 	cmd.table[BARKER_CORR_TH_ADD_MIN_MRC_IDX] = 390;
5407 	cmd.table[PTAM_ENERGY_TH_IDX] = 62;
5408 
5409 	/* at first, set up Rx to maximum sensitivity */
5410 	rv = iwk_cmd(sc, SENSITIVITY_CMD, &cmd, sizeof (cmd), 1);
5411 	if (rv) {
5412 		cmn_err(CE_WARN, "iwk_rx_sens_init(): "
5413 		    "in the process of initialization, "
5414 		    "failed to send rx sensitivity command\n");
5415 		return (rv);
5416 	}
5417 
5418 	rx_sens_p->flags |= IWK_SENSITIVITY_CALIB_ALLOW_MSK;
5419 
5420 	return (IWK_SUCCESS);
5421 }
5422 
5423 /*
5424  * make Receiver sensitivity calibration to adjust every chain's Rx sensitivity.
5425  * for more infomation, please refer to iwk_calibration.h file
5426  */
5427 static int iwk_rx_sens(iwk_sc_t *sc)
5428 {
5429 	int rv;
5430 	uint32_t actual_rx_time;
5431 	struct statistics_rx_non_phy *rx_general_p =
5432 	    &sc->sc_statistics.rx.general;
5433 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5434 	struct iwk_rx_sensitivity_cmd cmd;
5435 
5436 	if (!(rx_sens_p->flags & IWK_SENSITIVITY_CALIB_ALLOW_MSK)) {
5437 		cmn_err(CE_WARN, "iwk_rx_sens(): "
5438 		    "sensitivity initialization has not finished.\n");
5439 		return (DDI_FAILURE);
5440 	}
5441 
5442 	if (INTERFERENCE_DATA_AVAILABLE !=
5443 	    rx_general_p->interference_data_flag) {
5444 		cmn_err(CE_WARN, "iwk_rx_sens(): "
5445 		    "can't make rx sensitivity calibration,"
5446 		    "because of invalid statistics\n");
5447 		return (DDI_FAILURE);
5448 	}
5449 
5450 	actual_rx_time = rx_general_p->channel_load;
5451 	if (!actual_rx_time) {
5452 		IWK_DBG((IWK_DEBUG_CALIBRATION, "iwk_rx_sens(): "
5453 		    "can't make rx sensitivity calibration,"
5454 		    "because has not enough rx time\n"));
5455 		return (DDI_FAILURE);
5456 	}
5457 
5458 	/* make Rx sensitivity calibration for OFDM mode */
5459 	rv = iwk_ofdm_sens(sc, actual_rx_time);
5460 	if (rv) {
5461 		return (rv);
5462 	}
5463 
5464 	/* make Rx sensitivity calibration for CCK mode */
5465 	rv = iwk_cck_sens(sc, actual_rx_time);
5466 	if (rv) {
5467 		return (rv);
5468 	}
5469 
5470 	/*
5471 	 * if the sum of false alarm had not changed, nothing will be done
5472 	 */
5473 	if ((!(rx_sens_p->flags & IWK_SENSITIVITY_OFDM_UPDATE_MSK)) &&
5474 	    (!(rx_sens_p->flags & IWK_SENSITIVITY_CCK_UPDATE_MSK))) {
5475 		return (IWK_SUCCESS);
5476 	}
5477 
5478 	cmd.control = IWK_SENSITIVITY_CONTROL_WORK_TABLE;
5479 
5480 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
5481 	    rx_sens_p->auto_corr_ofdm_x4;
5482 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
5483 	    rx_sens_p->auto_corr_mrc_ofdm_x4;
5484 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
5485 	    rx_sens_p->auto_corr_ofdm_x1;
5486 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
5487 	    rx_sens_p->auto_corr_mrc_ofdm_x1;
5488 
5489 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
5490 	    rx_sens_p->auto_corr_cck_x4;
5491 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
5492 	    rx_sens_p->auto_corr_mrc_cck_x4;
5493 	cmd.table[MIN_ENERGY_CCK_DET_IDX] =
5494 	    rx_sens_p->min_energy_det_cck;
5495 
5496 	cmd.table[MIN_ENERGY_OFDM_DET_IDX] = 100;
5497 	cmd.table[BARKER_CORR_TH_ADD_MIN_IDX] = 190;
5498 	cmd.table[BARKER_CORR_TH_ADD_MIN_MRC_IDX] = 390;
5499 	cmd.table[PTAM_ENERGY_TH_IDX] = 62;
5500 
5501 	/*
5502 	 * send sensitivity command to complete actual sensitivity calibration
5503 	 */
5504 	rv = iwk_cmd(sc, SENSITIVITY_CMD, &cmd, sizeof (cmd), 1);
5505 	if (rv) {
5506 		cmn_err(CE_WARN, "iwk_rx_sens(): "
5507 		    "fail to send rx sensitivity command\n");
5508 		return (rv);
5509 	}
5510 
5511 	return (IWK_SUCCESS);
5512 
5513 }
5514 
5515 /*
5516  * make Rx sensitivity calibration for CCK mode.
5517  * This is preparing parameters for Sensitivity command
5518  */
5519 static int iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time)
5520 {
5521 	int i;
5522 	uint8_t noise_a, noise_b, noise_c;
5523 	uint8_t max_noise_abc, max_noise_20;
5524 	uint32_t beacon_a, beacon_b, beacon_c;
5525 	uint32_t min_beacon_abc, max_beacon_10;
5526 	uint32_t cck_fa, cck_bp;
5527 	uint32_t cck_sum_fa_bp;
5528 	uint32_t temp;
5529 	struct statistics_rx_non_phy *rx_general_p =
5530 	    &sc->sc_statistics.rx.general;
5531 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5532 
5533 	cck_fa = sc->sc_statistics.rx.cck.false_alarm_cnt;
5534 	cck_bp = sc->sc_statistics.rx.cck.plcp_err;
5535 
5536 	/* accumulate false alarm */
5537 	if (rx_sens_p->last_false_alarm_cnt_cck > cck_fa) {
5538 		temp = rx_sens_p->last_false_alarm_cnt_cck;
5539 		rx_sens_p->last_false_alarm_cnt_cck = cck_fa;
5540 		cck_fa += (0xFFFFFFFF - temp);
5541 	} else {
5542 		cck_fa -= rx_sens_p->last_false_alarm_cnt_cck;
5543 		rx_sens_p->last_false_alarm_cnt_cck += cck_fa;
5544 	}
5545 
5546 	/* accumulate bad plcp */
5547 	if (rx_sens_p->last_bad_plcp_cnt_cck > cck_bp) {
5548 		temp = rx_sens_p->last_bad_plcp_cnt_cck;
5549 		rx_sens_p->last_bad_plcp_cnt_cck = cck_bp;
5550 		cck_bp += (0xFFFFFFFF - temp);
5551 	} else {
5552 		cck_bp -= rx_sens_p->last_bad_plcp_cnt_cck;
5553 		rx_sens_p->last_bad_plcp_cnt_cck += cck_bp;
5554 	}
5555 
5556 	/*
5557 	 * calculate relative value
5558 	 */
5559 	cck_sum_fa_bp = (cck_fa + cck_bp) * 200 * 1024;
5560 	rx_sens_p->cck_noise_diff = 0;
5561 
5562 	noise_a =
5563 	    (uint8_t)((rx_general_p->beacon_silence_rssi_a & 0xFF00) >> 8);
5564 	noise_b =
5565 	    (uint8_t)((rx_general_p->beacon_silence_rssi_b & 0xFF00) >> 8);
5566 	noise_c =
5567 	    (uint8_t)((rx_general_p->beacon_silence_rssi_c & 0xFF00) >> 8);
5568 
5569 	beacon_a = rx_general_p->beacon_energy_a;
5570 	beacon_b = rx_general_p->beacon_energy_b;
5571 	beacon_c = rx_general_p->beacon_energy_c;
5572 
5573 	/* determine maximum noise among 3 chains */
5574 	if ((noise_a >= noise_b) && (noise_a >= noise_c)) {
5575 		max_noise_abc = noise_a;
5576 	} else if (noise_b >= noise_c) {
5577 		max_noise_abc = noise_b;
5578 	} else {
5579 		max_noise_abc = noise_c;
5580 	}
5581 
5582 	/* record maximum noise among 3 chains */
5583 	rx_sens_p->cck_noise_max[rx_sens_p->cck_noise_idx] = max_noise_abc;
5584 	rx_sens_p->cck_noise_idx++;
5585 	if (rx_sens_p->cck_noise_idx >= 20) {
5586 		rx_sens_p->cck_noise_idx = 0;
5587 	}
5588 
5589 	/* determine maximum noise among 20 max noise */
5590 	max_noise_20 = rx_sens_p->cck_noise_max[0];
5591 	for (i = 0; i < 20; i++) {
5592 		if (rx_sens_p->cck_noise_max[i] >= max_noise_20) {
5593 			max_noise_20 = rx_sens_p->cck_noise_max[i];
5594 		}
5595 	}
5596 
5597 	/* determine minimum beacon among 3 chains */
5598 	if ((beacon_a <= beacon_b) && (beacon_a <= beacon_c)) {
5599 		min_beacon_abc = beacon_a;
5600 	} else if (beacon_b <= beacon_c) {
5601 		min_beacon_abc = beacon_b;
5602 	} else {
5603 		min_beacon_abc = beacon_c;
5604 	}
5605 
5606 	/* record miminum beacon among 3 chains */
5607 	rx_sens_p->cck_beacon_min[rx_sens_p->cck_beacon_idx] = min_beacon_abc;
5608 	rx_sens_p->cck_beacon_idx++;
5609 	if (rx_sens_p->cck_beacon_idx >= 10) {
5610 		rx_sens_p->cck_beacon_idx = 0;
5611 	}
5612 
5613 	/* determine maximum beacon among 10 miminum beacon among 3 chains */
5614 	max_beacon_10 = rx_sens_p->cck_beacon_min[0];
5615 	for (i = 0; i < 10; i++) {
5616 		if (rx_sens_p->cck_beacon_min[i] >= max_beacon_10) {
5617 			max_beacon_10 = rx_sens_p->cck_beacon_min[i];
5618 		}
5619 	}
5620 
5621 	/* add a little margin */
5622 	max_beacon_10 += 6;
5623 
5624 	/* record the count of having no false alarms */
5625 	if (cck_sum_fa_bp < (5 * actual_rx_time)) {
5626 		rx_sens_p->cck_no_false_alarm_num++;
5627 	} else {
5628 		rx_sens_p->cck_no_false_alarm_num = 0;
5629 	}
5630 
5631 	/*
5632 	 * adjust parameters in sensitivity command
5633 	 * according to different status.
5634 	 * for more infomation, please refer to iwk_calibration.h file
5635 	 */
5636 	if (cck_sum_fa_bp > (50 * actual_rx_time)) {
5637 		rx_sens_p->cck_curr_state = IWK_TOO_MANY_FALSE_ALARM;
5638 
5639 		if (rx_sens_p->auto_corr_cck_x4 > 160) {
5640 			rx_sens_p->cck_noise_ref = max_noise_20;
5641 
5642 			if (rx_sens_p->min_energy_det_cck > 2) {
5643 				rx_sens_p->min_energy_det_cck -= 2;
5644 			}
5645 		}
5646 
5647 		if (rx_sens_p->auto_corr_cck_x4 < 160) {
5648 			rx_sens_p->auto_corr_cck_x4 = 160 + 1;
5649 		} else {
5650 			if ((rx_sens_p->auto_corr_cck_x4 + 3) < 200) {
5651 				rx_sens_p->auto_corr_cck_x4 += 3;
5652 			} else {
5653 				rx_sens_p->auto_corr_cck_x4 = 200;
5654 			}
5655 		}
5656 
5657 		if ((rx_sens_p->auto_corr_mrc_cck_x4 + 3) < 400) {
5658 			rx_sens_p->auto_corr_mrc_cck_x4 += 3;
5659 		} else {
5660 			rx_sens_p->auto_corr_mrc_cck_x4 = 400;
5661 		}
5662 
5663 		rx_sens_p->flags |= IWK_SENSITIVITY_CCK_UPDATE_MSK;
5664 
5665 	} else if (cck_sum_fa_bp < (5 * actual_rx_time)) {
5666 		rx_sens_p->cck_curr_state = IWK_TOO_FEW_FALSE_ALARM;
5667 
5668 		rx_sens_p->cck_noise_diff = (int32_t)rx_sens_p->cck_noise_ref -
5669 		    (int32_t)max_noise_20;
5670 
5671 		if ((rx_sens_p->cck_prev_state != IWK_TOO_MANY_FALSE_ALARM) &&
5672 		    ((rx_sens_p->cck_noise_diff > 2) ||
5673 		    (rx_sens_p->cck_no_false_alarm_num > 100))) {
5674 			if ((rx_sens_p->min_energy_det_cck + 2) < 97) {
5675 				rx_sens_p->min_energy_det_cck += 2;
5676 			} else {
5677 				rx_sens_p->min_energy_det_cck = 97;
5678 			}
5679 
5680 			if ((rx_sens_p->auto_corr_cck_x4 - 3) > 125) {
5681 				rx_sens_p->auto_corr_cck_x4 -= 3;
5682 			} else {
5683 				rx_sens_p->auto_corr_cck_x4 = 125;
5684 			}
5685 
5686 			if ((rx_sens_p->auto_corr_mrc_cck_x4 -3) > 200) {
5687 				rx_sens_p->auto_corr_mrc_cck_x4 -= 3;
5688 			} else {
5689 				rx_sens_p->auto_corr_mrc_cck_x4 = 200;
5690 			}
5691 
5692 			rx_sens_p->flags |= IWK_SENSITIVITY_CCK_UPDATE_MSK;
5693 		} else {
5694 			rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
5695 		}
5696 	} else {
5697 		rx_sens_p->cck_curr_state = IWK_GOOD_RANGE_FALSE_ALARM;
5698 
5699 		rx_sens_p->cck_noise_ref = max_noise_20;
5700 
5701 		if (IWK_TOO_MANY_FALSE_ALARM == rx_sens_p->cck_prev_state) {
5702 			rx_sens_p->min_energy_det_cck -= 8;
5703 		}
5704 
5705 		rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
5706 	}
5707 
5708 	if (rx_sens_p->min_energy_det_cck < max_beacon_10) {
5709 		rx_sens_p->min_energy_det_cck = (uint16_t)max_beacon_10;
5710 	}
5711 
5712 	rx_sens_p->cck_prev_state = rx_sens_p->cck_curr_state;
5713 
5714 	return (IWK_SUCCESS);
5715 }
5716 
5717 /*
5718  * make Rx sensitivity calibration for OFDM mode.
5719  * This is preparing parameters for Sensitivity command
5720  */
5721 static int iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time)
5722 {
5723 	uint32_t temp;
5724 	uint16_t temp1;
5725 	uint32_t ofdm_fa, ofdm_bp;
5726 	uint32_t ofdm_sum_fa_bp;
5727 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5728 
5729 	ofdm_fa = sc->sc_statistics.rx.ofdm.false_alarm_cnt;
5730 	ofdm_bp = sc->sc_statistics.rx.ofdm.plcp_err;
5731 
5732 	/* accumulate false alarm */
5733 	if (rx_sens_p->last_false_alarm_cnt_ofdm > ofdm_fa) {
5734 		temp = rx_sens_p->last_false_alarm_cnt_ofdm;
5735 		rx_sens_p->last_false_alarm_cnt_ofdm = ofdm_fa;
5736 		ofdm_fa += (0xFFFFFFFF - temp);
5737 	} else {
5738 		ofdm_fa -= rx_sens_p->last_false_alarm_cnt_ofdm;
5739 		rx_sens_p->last_false_alarm_cnt_ofdm += ofdm_fa;
5740 	}
5741 
5742 	/* accumulate bad plcp */
5743 	if (rx_sens_p->last_bad_plcp_cnt_ofdm > ofdm_bp) {
5744 		temp = rx_sens_p->last_bad_plcp_cnt_ofdm;
5745 		rx_sens_p->last_bad_plcp_cnt_ofdm = ofdm_bp;
5746 		ofdm_bp += (0xFFFFFFFF - temp);
5747 	} else {
5748 		ofdm_bp -= rx_sens_p->last_bad_plcp_cnt_ofdm;
5749 		rx_sens_p->last_bad_plcp_cnt_ofdm += ofdm_bp;
5750 	}
5751 
5752 	ofdm_sum_fa_bp = (ofdm_fa + ofdm_bp) * 200 * 1024; /* relative value */
5753 
5754 	/*
5755 	 * adjust parameter in sensitivity command according to different status
5756 	 */
5757 	if (ofdm_sum_fa_bp > (50 * actual_rx_time)) {
5758 		temp1 = rx_sens_p->auto_corr_ofdm_x4 + 1;
5759 		rx_sens_p->auto_corr_ofdm_x4 = (temp1 <= 120) ? temp1 : 120;
5760 
5761 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x4 + 1;
5762 		rx_sens_p->auto_corr_mrc_ofdm_x4 =
5763 		    (temp1 <= 210) ? temp1 : 210;
5764 
5765 		temp1 = rx_sens_p->auto_corr_ofdm_x1 + 1;
5766 		rx_sens_p->auto_corr_ofdm_x1 = (temp1 <= 140) ? temp1 : 140;
5767 
5768 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x1 + 1;
5769 		rx_sens_p->auto_corr_mrc_ofdm_x1 =
5770 		    (temp1 <= 270) ? temp1 : 270;
5771 
5772 		rx_sens_p->flags |= IWK_SENSITIVITY_OFDM_UPDATE_MSK;
5773 
5774 	} else if (ofdm_sum_fa_bp < (5 * actual_rx_time)) {
5775 		temp1 = rx_sens_p->auto_corr_ofdm_x4 - 1;
5776 		rx_sens_p->auto_corr_ofdm_x4 = (temp1 >= 85) ? temp1 : 85;
5777 
5778 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x4 - 1;
5779 		rx_sens_p->auto_corr_mrc_ofdm_x4 =
5780 		    (temp1 >= 170) ? temp1 : 170;
5781 
5782 		temp1 = rx_sens_p->auto_corr_ofdm_x1 - 1;
5783 		rx_sens_p->auto_corr_ofdm_x1 = (temp1 >= 105) ? temp1 : 105;
5784 
5785 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x1 - 1;
5786 		rx_sens_p->auto_corr_mrc_ofdm_x1 =
5787 		    (temp1 >= 220) ? temp1 : 220;
5788 
5789 		rx_sens_p->flags |= IWK_SENSITIVITY_OFDM_UPDATE_MSK;
5790 
5791 	} else {
5792 		rx_sens_p->flags &= (~IWK_SENSITIVITY_OFDM_UPDATE_MSK);
5793 	}
5794 
5795 	return (IWK_SUCCESS);
5796 }
5797 
5798 /*
5799  * additional process to management frames
5800  */
5801 static void iwk_recv_mgmt(struct ieee80211com *ic, mblk_t *mp,
5802     struct ieee80211_node *in,
5803     int subtype, int rssi, uint32_t rstamp)
5804 {
5805 	iwk_sc_t *sc = (iwk_sc_t *)ic;
5806 	struct ieee80211_frame *wh;
5807 	uint8_t index1, index2;
5808 	int err;
5809 
5810 	sc->sc_recv_mgmt(ic, mp, in, subtype, rssi, rstamp);
5811 
5812 	mutex_enter(&sc->sc_glock);
5813 	switch (subtype) {
5814 	case IEEE80211_FC0_SUBTYPE_BEACON:
5815 		if (sc->sc_ibss.ibss_beacon.syncbeacon && in == ic->ic_bss &&
5816 		    ic->ic_state == IEEE80211_S_RUN) {
5817 			if (ieee80211_beacon_update(ic, in,
5818 			    &sc->sc_ibss.ibss_beacon.iwk_boff,
5819 			    sc->sc_ibss.ibss_beacon.mp, 0)) {
5820 				bcopy(sc->sc_ibss.ibss_beacon.mp->b_rptr,
5821 				    sc->sc_ibss.ibss_beacon.beacon_cmd.
5822 				    bcon_frame,
5823 				    MBLKL(sc->sc_ibss.ibss_beacon.mp));
5824 			}
5825 			err = iwk_cmd(sc, REPLY_TX_BEACON,
5826 			    &sc->sc_ibss.ibss_beacon.beacon_cmd,
5827 			    sc->sc_ibss.ibss_beacon.beacon_cmd_len, 1);
5828 			if (err != IWK_SUCCESS) {
5829 				cmn_err(CE_WARN, "iwk_recv_mgmt(): "
5830 				    "failed to TX beacon.\n");
5831 			}
5832 			sc->sc_ibss.ibss_beacon.syncbeacon = 0;
5833 		}
5834 		if (ic->ic_opmode == IEEE80211_M_IBSS &&
5835 		    ic->ic_state == IEEE80211_S_RUN) {
5836 			wh = (struct ieee80211_frame *)mp->b_rptr;
5837 			mutex_enter(&sc->sc_ibss.node_tb_lock);
5838 			/*
5839 			 * search for node in ibss node table
5840 			 */
5841 			for (index1 = IWK_STA_ID; index1 < IWK_STATION_COUNT;
5842 			    index1++) {
5843 				if (sc->sc_ibss.ibss_node_tb[index1].used &&
5844 				    IEEE80211_ADDR_EQ(sc->sc_ibss.
5845 				    ibss_node_tb[index1].node.bssid,
5846 				    wh->i_addr2)) {
5847 					break;
5848 				}
5849 			}
5850 			/*
5851 			 * if don't find in ibss node table
5852 			 */
5853 			if (index1 >= IWK_BROADCAST_ID) {
5854 				err = iwk_clean_add_node_ibss(ic,
5855 				    wh->i_addr2, &index2);
5856 				if (err != IWK_SUCCESS) {
5857 					cmn_err(CE_WARN, "iwk_recv_mgmt(): "
5858 					    "failed to clean all nodes "
5859 					    "and add one node\n");
5860 				}
5861 			}
5862 			mutex_exit(&sc->sc_ibss.node_tb_lock);
5863 		}
5864 		break;
5865 	case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
5866 		break;
5867 	}
5868 	mutex_exit(&sc->sc_glock);
5869 }
5870 
5871 /*
5872  * 1)  log_event_table_ptr indicates base of the event log.  This traces
5873  *     a 256-entry history of uCode execution within a circular buffer.
5874  *     Its header format is:
5875  *
5876  *	uint32_t log_size;	log capacity (in number of entries)
5877  *	uint32_t type;	(1) timestamp with each entry, (0) no timestamp
5878  *	uint32_t wraps;	# times uCode has wrapped to top of circular buffer
5879  *      uint32_t write_index;	next circular buffer entry that uCode would fill
5880  *
5881  *     The header is followed by the circular buffer of log entries.  Entries
5882  *     with timestamps have the following format:
5883  *
5884  *	uint32_t event_id;     range 0 - 1500
5885  *	uint32_t timestamp;    low 32 bits of TSF (of network, if associated)
5886  *	uint32_t data;         event_id-specific data value
5887  *
5888  *     Entries without timestamps contain only event_id and data.
5889  */
5890 
5891 /*
5892  * iwk_write_event_log - Write event log to dmesg
5893  */
5894 static void iwk_write_event_log(iwk_sc_t *sc)
5895 {
5896 	uint32_t log_event_table_ptr;	/* Start address of event table */
5897 	uint32_t startptr;	/* Start address of log data */
5898 	uint32_t logptr;	/* address of log data entry */
5899 	uint32_t i, n, num_events;
5900 	uint32_t event_id, data1, data2; /* log data */
5901 
5902 	uint32_t log_size;   /* log capacity (in number of entries) */
5903 	uint32_t type;	/* (1)timestamp with each entry,(0) no timestamp */
5904 	uint32_t wraps;	/* # times uCode has wrapped to */
5905 			/* the top of circular buffer */
5906 	uint32_t idx; /* index of entry to be filled in next */
5907 
5908 	log_event_table_ptr = sc->sc_card_alive_run.log_event_table_ptr;
5909 	if (!(log_event_table_ptr)) {
5910 		IWK_DBG((IWK_DEBUG_EEPROM, "NULL event table pointer\n"));
5911 		return;
5912 	}
5913 
5914 	iwk_mac_access_enter(sc);
5915 
5916 	/* Read log header */
5917 	log_size = iwk_mem_read(sc, log_event_table_ptr);
5918 	log_event_table_ptr += sizeof (uint32_t); /* addr of "type" */
5919 	type = iwk_mem_read(sc, log_event_table_ptr);
5920 	log_event_table_ptr += sizeof (uint32_t); /* addr of "wraps" */
5921 	wraps = iwk_mem_read(sc, log_event_table_ptr);
5922 	log_event_table_ptr += sizeof (uint32_t); /* addr of "idx" */
5923 	idx = iwk_mem_read(sc, log_event_table_ptr);
5924 	startptr = log_event_table_ptr +
5925 	    sizeof (uint32_t); /* addr of start of log data */
5926 	if (!log_size & !wraps) {
5927 		IWK_DBG((IWK_DEBUG_EEPROM, "Empty log\n"));
5928 		iwk_mac_access_exit(sc);
5929 		return;
5930 	}
5931 
5932 	if (!wraps) {
5933 		num_events = idx;
5934 		logptr = startptr;
5935 	} else {
5936 		num_events = log_size - idx;
5937 		n = type ? 2 : 3;
5938 		logptr = startptr + (idx * n * sizeof (uint32_t));
5939 	}
5940 
5941 	for (i = 0; i < num_events; i++) {
5942 		event_id = iwk_mem_read(sc, logptr);
5943 		logptr += sizeof (uint32_t);
5944 		data1 = iwk_mem_read(sc, logptr);
5945 		logptr += sizeof (uint32_t);
5946 		if (type == 0) { /* no timestamp */
5947 			IWK_DBG((IWK_DEBUG_EEPROM, "Event ID=%d, Data=%x0x",
5948 			    event_id, data1));
5949 		} else { /* timestamp */
5950 			data2 = iwk_mem_read(sc, logptr);
5951 			printf("Time=%d, Event ID=%d, Data=0x%x\n",
5952 			    data1, event_id, data2);
5953 			IWK_DBG((IWK_DEBUG_EEPROM,
5954 			    "Time=%d, Event ID=%d, Data=0x%x\n",
5955 			    data1, event_id, data2));
5956 			logptr += sizeof (uint32_t);
5957 		}
5958 	}
5959 
5960 	/*
5961 	 * Print the wrapped around entries, if any
5962 	 */
5963 	if (wraps) {
5964 		logptr = startptr;
5965 		for (i = 0; i < idx; i++) {
5966 			event_id = iwk_mem_read(sc, logptr);
5967 			logptr += sizeof (uint32_t);
5968 			data1 = iwk_mem_read(sc, logptr);
5969 			logptr += sizeof (uint32_t);
5970 			if (type == 0) { /* no timestamp */
5971 				IWK_DBG((IWK_DEBUG_EEPROM,
5972 				    "Event ID=%d, Data=%x0x", event_id, data1));
5973 			} else { /* timestamp */
5974 				data2 = iwk_mem_read(sc, logptr);
5975 				IWK_DBG((IWK_DEBUG_EEPROM,
5976 				    "Time = %d, Event ID=%d, Data=0x%x\n",
5977 				    data1, event_id, data2));
5978 				logptr += sizeof (uint32_t);
5979 			}
5980 		}
5981 	}
5982 
5983 	iwk_mac_access_exit(sc);
5984 }
5985 
5986 /*
5987  * error_event_table_ptr indicates base of the error log.  This contains
5988  * information about any uCode error that occurs.  For 4965, the format is:
5989  *
5990  * uint32_t valid;        (nonzero) valid, (0) log is empty
5991  * uint32_t error_id;     type of error
5992  * uint32_t pc;           program counter
5993  * uint32_t blink1;       branch link
5994  * uint32_t blink2;       branch link
5995  * uint32_t ilink1;       interrupt link
5996  * uint32_t ilink2;       interrupt link
5997  * uint32_t data1;        error-specific data
5998  * uint32_t data2;        error-specific data
5999  * uint32_t line;         source code line of error
6000  * uint32_t bcon_time;    beacon timer
6001  * uint32_t tsf_low;      network timestamp function timer
6002  * uint32_t tsf_hi;       network timestamp function timer
6003  */
6004 /*
6005  * iwk_write_error_log - Write error log to dmesg
6006  */
6007 static void iwk_write_error_log(iwk_sc_t *sc)
6008 {
6009 	uint32_t err_ptr;	/* Start address of error log */
6010 	uint32_t valid;		/* is error log valid */
6011 
6012 	err_ptr = sc->sc_card_alive_run.error_event_table_ptr;
6013 	if (!(err_ptr)) {
6014 		IWK_DBG((IWK_DEBUG_EEPROM, "NULL error table pointer\n"));
6015 		return;
6016 	}
6017 
6018 	iwk_mac_access_enter(sc);
6019 
6020 	valid = iwk_mem_read(sc, err_ptr);
6021 	if (!(valid)) {
6022 		IWK_DBG((IWK_DEBUG_EEPROM, "Error data not valid\n"));
6023 		iwk_mac_access_exit(sc);
6024 		return;
6025 	}
6026 	err_ptr += sizeof (uint32_t);
6027 	IWK_DBG((IWK_DEBUG_EEPROM, "err=%d ", iwk_mem_read(sc, err_ptr)));
6028 	err_ptr += sizeof (uint32_t);
6029 	IWK_DBG((IWK_DEBUG_EEPROM, "pc=0x%X ", iwk_mem_read(sc, err_ptr)));
6030 	err_ptr += sizeof (uint32_t);
6031 	IWK_DBG((IWK_DEBUG_EEPROM,
6032 	    "branch link1=0x%X ", iwk_mem_read(sc, err_ptr)));
6033 	err_ptr += sizeof (uint32_t);
6034 	IWK_DBG((IWK_DEBUG_EEPROM,
6035 	    "branch link2=0x%X ", iwk_mem_read(sc, err_ptr)));
6036 	err_ptr += sizeof (uint32_t);
6037 	IWK_DBG((IWK_DEBUG_EEPROM,
6038 	    "interrupt link1=0x%X ", iwk_mem_read(sc, err_ptr)));
6039 	err_ptr += sizeof (uint32_t);
6040 	IWK_DBG((IWK_DEBUG_EEPROM,
6041 	    "interrupt link2=0x%X ", iwk_mem_read(sc, err_ptr)));
6042 	err_ptr += sizeof (uint32_t);
6043 	IWK_DBG((IWK_DEBUG_EEPROM, "data1=0x%X ", iwk_mem_read(sc, err_ptr)));
6044 	err_ptr += sizeof (uint32_t);
6045 	IWK_DBG((IWK_DEBUG_EEPROM, "data2=0x%X ", iwk_mem_read(sc, err_ptr)));
6046 	err_ptr += sizeof (uint32_t);
6047 	IWK_DBG((IWK_DEBUG_EEPROM, "line=%d ", iwk_mem_read(sc, err_ptr)));
6048 	err_ptr += sizeof (uint32_t);
6049 	IWK_DBG((IWK_DEBUG_EEPROM, "bcon_time=%d ", iwk_mem_read(sc, err_ptr)));
6050 	err_ptr += sizeof (uint32_t);
6051 	IWK_DBG((IWK_DEBUG_EEPROM, "tsf_low=%d ", iwk_mem_read(sc, err_ptr)));
6052 	err_ptr += sizeof (uint32_t);
6053 	IWK_DBG((IWK_DEBUG_EEPROM, "tsf_hi=%d\n", iwk_mem_read(sc, err_ptr)));
6054 
6055 	iwk_mac_access_exit(sc);
6056 }
6057 
6058 static int
6059 iwk_run_state_config_ibss(ieee80211com_t *ic)
6060 {
6061 	iwk_sc_t *sc = (iwk_sc_t *)ic;
6062 	ieee80211_node_t *in = ic->ic_bss;
6063 	int i, err = IWK_SUCCESS;
6064 
6065 	mutex_enter(&sc->sc_ibss.node_tb_lock);
6066 
6067 	/*
6068 	 * clean all nodes in ibss node table assure be
6069 	 * consistent with hardware
6070 	 */
6071 	for (i = IWK_STA_ID; i < IWK_STATION_COUNT; i++) {
6072 		sc->sc_ibss.ibss_node_tb[i].used = 0;
6073 		(void) memset(&sc->sc_ibss.ibss_node_tb[i].node,
6074 		    0,
6075 		    sizeof (iwk_add_sta_t));
6076 	}
6077 
6078 	sc->sc_ibss.node_number = 0;
6079 
6080 	mutex_exit(&sc->sc_ibss.node_tb_lock);
6081 
6082 	/*
6083 	 * configure RX and TX
6084 	 */
6085 	sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
6086 
6087 	sc->sc_config.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
6088 	sc->sc_config.filter_flags =
6089 	    LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
6090 	    RXON_FILTER_DIS_DECRYPT_MSK |
6091 	    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
6092 
6093 	sc->sc_config.assoc_id = 0;
6094 
6095 	IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
6096 	sc->sc_config.chan = ieee80211_chan2ieee(ic,
6097 	    in->in_chan);
6098 
6099 	if (ic->ic_curmode == IEEE80211_MODE_11B) {
6100 		sc->sc_config.cck_basic_rates = 0x03;
6101 		sc->sc_config.ofdm_basic_rates = 0;
6102 	} else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
6103 	    (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
6104 		sc->sc_config.cck_basic_rates = 0;
6105 		sc->sc_config.ofdm_basic_rates = 0x15;
6106 
6107 	} else {
6108 		sc->sc_config.cck_basic_rates = 0x0f;
6109 		sc->sc_config.ofdm_basic_rates = 0xff;
6110 	}
6111 
6112 	sc->sc_config.flags &=
6113 	    ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
6114 	    RXON_FLG_SHORT_SLOT_MSK);
6115 
6116 	if (ic->ic_flags & IEEE80211_F_SHSLOT) {
6117 		sc->sc_config.flags |=
6118 		    LE_32(RXON_FLG_SHORT_SLOT_MSK);
6119 	}
6120 
6121 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
6122 		sc->sc_config.flags |=
6123 		    LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
6124 	}
6125 
6126 	sc->sc_config.filter_flags |=
6127 	    LE_32(RXON_FILTER_ASSOC_MSK);
6128 
6129 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
6130 	    sizeof (iwk_rxon_cmd_t), 1);
6131 	if (err != IWK_SUCCESS) {
6132 		cmn_err(CE_WARN, "iwk_run_state_config_ibss(): "
6133 		    "failed to update configuration.\n");
6134 		return (err);
6135 	}
6136 
6137 	return (err);
6138 
6139 }
6140 
6141 static int
6142 iwk_run_state_config_sta(ieee80211com_t *ic)
6143 {
6144 	iwk_sc_t *sc = (iwk_sc_t *)ic;
6145 	ieee80211_node_t *in = ic->ic_bss;
6146 	int err = IWK_SUCCESS;
6147 
6148 	/* update adapter's configuration */
6149 	if (sc->sc_assoc_id != in->in_associd) {
6150 		cmn_err(CE_WARN, "iwk_run_state_config_sta(): "
6151 		    "associate ID mismatch: expected %d, "
6152 		    "got %d\n",
6153 		    in->in_associd, sc->sc_assoc_id);
6154 	}
6155 	sc->sc_config.assoc_id = in->in_associd & 0x3fff;
6156 
6157 	/*
6158 	 * short preamble/slot time are
6159 	 * negotiated when associating
6160 	 */
6161 	sc->sc_config.flags &=
6162 	    ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
6163 	    RXON_FLG_SHORT_SLOT_MSK);
6164 
6165 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
6166 		sc->sc_config.flags |=
6167 		    LE_32(RXON_FLG_SHORT_SLOT_MSK);
6168 
6169 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
6170 		sc->sc_config.flags |=
6171 		    LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
6172 
6173 	sc->sc_config.filter_flags |=
6174 	    LE_32(RXON_FILTER_ASSOC_MSK);
6175 
6176 	if (ic->ic_opmode != IEEE80211_M_STA)
6177 		sc->sc_config.filter_flags |=
6178 		    LE_32(RXON_FILTER_BCON_AWARE_MSK);
6179 
6180 	IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x"
6181 	    " filter_flags %x\n",
6182 	    sc->sc_config.chan, sc->sc_config.flags,
6183 	    sc->sc_config.filter_flags));
6184 
6185 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
6186 	    sizeof (iwk_rxon_cmd_t), 1);
6187 	if (err != IWK_SUCCESS) {
6188 		cmn_err(CE_WARN, "iwk_run_state_config_sta(): "
6189 		    "failed to update configuration\n");
6190 		return (err);
6191 	}
6192 
6193 	return (err);
6194 }
6195 
6196 static int
6197 iwk_fast_recover(iwk_sc_t *sc)
6198 {
6199 	ieee80211com_t *ic = &sc->sc_ic;
6200 	int err;
6201 
6202 	mutex_enter(&sc->sc_glock);
6203 
6204 	/* restore runtime configuration */
6205 	bcopy(&sc->sc_config_save, &sc->sc_config,
6206 	    sizeof (sc->sc_config));
6207 
6208 	/* reset state to handle reassociations correctly */
6209 	sc->sc_config.assoc_id = 0;
6210 	sc->sc_config.filter_flags &=
6211 	    ~LE_32(RXON_FILTER_ASSOC_MSK);
6212 
6213 	if ((err = iwk_hw_set_before_auth(sc)) != 0) {
6214 		cmn_err(CE_WARN, "iwk_fast_recover(): "
6215 		    "failed to setup authentication\n");
6216 		mutex_exit(&sc->sc_glock);
6217 		return (err);
6218 	}
6219 
6220 	bcopy(&sc->sc_config_save, &sc->sc_config,
6221 	    sizeof (sc->sc_config));
6222 
6223 	/* update adapter's configuration */
6224 	err = iwk_run_state_config_sta(ic);
6225 	if (err != IWK_SUCCESS) {
6226 		cmn_err(CE_WARN, "iwk_fast_recover(): "
6227 		    "failed to setup association\n");
6228 		mutex_exit(&sc->sc_glock);
6229 		return (err);
6230 	}
6231 
6232 	/* obtain current temperature of chipset */
6233 	sc->sc_tempera = iwk_curr_tempera(sc);
6234 
6235 	/*
6236 	 * make Tx power calibration to determine
6237 	 * the gains of DSP and radio
6238 	 */
6239 	err = iwk_tx_power_calibration(sc);
6240 	if (err) {
6241 		cmn_err(CE_WARN, "iwk_fast_recover(): "
6242 		    "failed to set tx power table\n");
6243 		mutex_exit(&sc->sc_glock);
6244 		return (err);
6245 	}
6246 
6247 	/*
6248 	 * make initialization for Receiver
6249 	 * sensitivity calibration
6250 	 */
6251 	err = iwk_rx_sens_init(sc);
6252 	if (err) {
6253 		cmn_err(CE_WARN, "iwk_fast_recover(): "
6254 		    "failed to init RX sensitivity\n");
6255 		mutex_exit(&sc->sc_glock);
6256 		return (err);
6257 	}
6258 
6259 	/* make initialization for Receiver gain balance */
6260 	err = iwk_rxgain_diff_init(sc);
6261 	if (err) {
6262 		cmn_err(CE_WARN, "iwk_fast_recover(): "
6263 		    "failed to init phy calibration\n");
6264 		mutex_exit(&sc->sc_glock);
6265 		return (err);
6266 
6267 	}
6268 	/* set LED on */
6269 	iwk_set_led(sc, 2, 0, 1);
6270 
6271 	mutex_exit(&sc->sc_glock);
6272 
6273 	/* update keys */
6274 	if (ic->ic_flags & IEEE80211_F_PRIVACY) {
6275 		for (int i = 0; i < IEEE80211_KEY_MAX; i++) {
6276 			if (ic->ic_nw_keys[i].wk_keyix == IEEE80211_KEYIX_NONE)
6277 				continue;
6278 			err = iwk_key_set(ic, &ic->ic_nw_keys[i],
6279 			    ic->ic_bss->in_macaddr);
6280 			/* failure */
6281 			if (err == 0) {
6282 				cmn_err(CE_WARN, "iwk_fast_recover(): "
6283 				    "failed to setup hardware keys\n");
6284 				return (IWK_FAIL);
6285 			}
6286 		}
6287 	}
6288 
6289 	sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
6290 
6291 	/* start queue */
6292 	IWK_DBG((IWK_DEBUG_FW, "iwk_fast_recover(): resume xmit\n"));
6293 	mac_tx_update(ic->ic_mach);
6294 
6295 
6296 	return (IWK_SUCCESS);
6297 }
6298 
6299 static int
6300 iwk_start_tx_beacon(ieee80211com_t *ic)
6301 {
6302 	iwk_sc_t *sc = (iwk_sc_t *)ic;
6303 	ieee80211_node_t *in = ic->ic_bss;
6304 	int err = IWK_SUCCESS;
6305 	iwk_tx_beacon_cmd_t  *tx_beacon_p;
6306 	uint16_t  masks = 0;
6307 	mblk_t *mp;
6308 	int rate;
6309 
6310 	/*
6311 	 * allocate and transmit beacon frames
6312 	 */
6313 	tx_beacon_p = &sc->sc_ibss.ibss_beacon.beacon_cmd;
6314 
6315 	(void) memset(tx_beacon_p, 0,
6316 	    sizeof (iwk_tx_beacon_cmd_t));
6317 	rate = 0;
6318 	masks = 0;
6319 
6320 	tx_beacon_p->config.sta_id = IWK_BROADCAST_ID;
6321 	tx_beacon_p->config.stop_time.life_time =
6322 	    LE_32(0xffffffff);
6323 
6324 	if (sc->sc_ibss.ibss_beacon.mp != NULL) {
6325 		freemsg(sc->sc_ibss.ibss_beacon.mp);
6326 		sc->sc_ibss.ibss_beacon.mp = NULL;
6327 	}
6328 
6329 	sc->sc_ibss.ibss_beacon.mp =
6330 	    ieee80211_beacon_alloc(ic, in,
6331 	    &sc->sc_ibss.ibss_beacon.iwk_boff);
6332 	if (sc->sc_ibss.ibss_beacon.mp == NULL) {
6333 		cmn_err(CE_WARN, "iwk_start_tx_beacon(): "
6334 		    "failed to get beacon frame.\n");
6335 		return (IWK_FAIL);
6336 	}
6337 
6338 	mp = sc->sc_ibss.ibss_beacon.mp;
6339 
6340 	ASSERT(mp->b_cont == NULL);
6341 
6342 	bcopy(mp->b_rptr, tx_beacon_p->bcon_frame, MBLKL(mp));
6343 
6344 	tx_beacon_p->config.len = (uint16_t)(MBLKL(mp));
6345 	sc->sc_ibss.ibss_beacon.beacon_cmd_len =
6346 	    sizeof (iwk_tx_cmd_t) +
6347 	    4 + tx_beacon_p->config.len;
6348 
6349 	/*
6350 	 * beacons are sent at 1M
6351 	 */
6352 	rate = in->in_rates.ir_rates[0];
6353 	rate &= IEEE80211_RATE_VAL;
6354 
6355 	if (2 == rate || 4 == rate || 11 == rate ||
6356 	    22 == rate) {
6357 		masks |= RATE_MCS_CCK_MSK;
6358 	}
6359 
6360 	masks |= RATE_MCS_ANT_B_MSK;
6361 
6362 	tx_beacon_p->config.rate.r.rate_n_flags =
6363 	    (iwk_rate_to_plcp(rate) | masks);
6364 
6365 
6366 	tx_beacon_p->config.tx_flags =
6367 	    (TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK);
6368 
6369 	if (ic->ic_bss->in_tstamp.tsf != 0) {
6370 		sc->sc_ibss.ibss_beacon.syncbeacon = 1;
6371 	} else {
6372 		if (ieee80211_beacon_update(ic, in,
6373 		    &sc->sc_ibss.ibss_beacon.iwk_boff,
6374 		    mp, 0)) {
6375 			bcopy(mp->b_rptr,
6376 			    tx_beacon_p->bcon_frame,
6377 			    MBLKL(mp));
6378 		}
6379 
6380 		err = iwk_cmd(sc, REPLY_TX_BEACON,
6381 		    tx_beacon_p,
6382 		    sc->sc_ibss.ibss_beacon.beacon_cmd_len,
6383 		    1);
6384 		if (err != IWK_SUCCESS) {
6385 			cmn_err(CE_WARN, "iwk_start_tx_beacon(): "
6386 			    "failed to TX beacon.\n");
6387 			return (err);
6388 		}
6389 
6390 		sc->sc_ibss.ibss_beacon.syncbeacon = 0;
6391 	}
6392 
6393 	return (err);
6394 }
6395 
6396 static int
6397 iwk_clean_add_node_ibss(struct ieee80211com *ic,
6398     uint8_t addr[IEEE80211_ADDR_LEN], uint8_t *index2)
6399 {
6400 	iwk_sc_t *sc = (iwk_sc_t *)ic;
6401 	uint8_t	index;
6402 	iwk_add_sta_t bc_node;
6403 	iwk_link_quality_cmd_t bc_link_quality;
6404 	iwk_link_quality_cmd_t link_quality;
6405 	uint16_t  bc_masks = 0;
6406 	uint16_t  masks = 0;
6407 	int i, rate;
6408 	struct ieee80211_rateset rs;
6409 	iwk_ibss_node_t *ibss_node_p;
6410 	int err = IWK_SUCCESS;
6411 
6412 	/*
6413 	 * find a location that is not
6414 	 * used in ibss node table
6415 	 */
6416 	for (index = IWK_STA_ID;
6417 	    index < IWK_STATION_COUNT; index++) {
6418 		if (!sc->sc_ibss.ibss_node_tb[index].used) {
6419 			break;
6420 		}
6421 	}
6422 
6423 	/*
6424 	 * if have too many nodes in hardware, clean up
6425 	 */
6426 	if (index < IWK_BROADCAST_ID &&
6427 	    sc->sc_ibss.node_number >= 25) {
6428 		if (iwk_cmd(sc, REPLY_REMOVE_ALL_STA,
6429 		    NULL, 0, 1) != IWK_SUCCESS) {
6430 			cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6431 			    "failed to remove all nodes in hardware\n");
6432 			return (IWK_FAIL);
6433 		}
6434 
6435 		for (i = IWK_STA_ID; i < IWK_STATION_COUNT; i++) {
6436 			sc->sc_ibss.ibss_node_tb[i].used = 0;
6437 			(void) memset(&sc->sc_ibss.ibss_node_tb[i].node,
6438 			    0, sizeof (iwk_add_sta_t));
6439 		}
6440 
6441 		sc->sc_ibss.node_number = 0;
6442 
6443 		/*
6444 		 * add broadcast node so that we
6445 		 * can send broadcast frame
6446 		 */
6447 		(void) memset(&bc_node, 0, sizeof (bc_node));
6448 		(void) memset(bc_node.bssid, 0xff, 6);
6449 		bc_node.id = IWK_BROADCAST_ID;
6450 
6451 		err = iwk_cmd(sc, REPLY_ADD_STA, &bc_node, sizeof (bc_node), 1);
6452 		if (err != IWK_SUCCESS) {
6453 		cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6454 		    "failed to add broadcast node\n");
6455 		return (err);
6456 		}
6457 
6458 		/* TX_LINK_QUALITY cmd */
6459 		(void) memset(&bc_link_quality, 0, sizeof (bc_link_quality));
6460 		for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
6461 			bc_masks |= RATE_MCS_CCK_MSK;
6462 			bc_masks |= RATE_MCS_ANT_B_MSK;
6463 			bc_masks &= ~RATE_MCS_ANT_A_MSK;
6464 			bc_link_quality.rate_n_flags[i] =
6465 			    iwk_rate_to_plcp(2) | bc_masks;
6466 		}
6467 
6468 		bc_link_quality.general_params.single_stream_ant_msk = 2;
6469 		bc_link_quality.general_params.dual_stream_ant_msk = 3;
6470 		bc_link_quality.agg_params.agg_dis_start_th = 3;
6471 		bc_link_quality.agg_params.agg_time_limit = LE_16(4000);
6472 		bc_link_quality.sta_id = IWK_BROADCAST_ID;
6473 
6474 		err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD,
6475 		    &bc_link_quality, sizeof (bc_link_quality), 1);
6476 		if (err != IWK_SUCCESS) {
6477 			cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6478 			    "failed to config link quality table\n");
6479 			return (err);
6480 		}
6481 	}
6482 
6483 	if (index >= IWK_BROADCAST_ID) {
6484 		cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6485 		    "the count of node in hardware is too much\n");
6486 		return (IWK_FAIL);
6487 	}
6488 
6489 	/*
6490 	 * add a node into hardware
6491 	 */
6492 	ibss_node_p = &sc->sc_ibss.ibss_node_tb[index];
6493 
6494 	ibss_node_p->used = 1;
6495 
6496 	(void) memset(&ibss_node_p->node, 0,
6497 	    sizeof (iwk_add_sta_t));
6498 
6499 	IEEE80211_ADDR_COPY(ibss_node_p->node.bssid, addr);
6500 	ibss_node_p->node.id = index;
6501 	ibss_node_p->node.control = 0;
6502 	ibss_node_p->node.flags = 0;
6503 
6504 	err = iwk_cmd(sc, REPLY_ADD_STA, &ibss_node_p->node,
6505 	    sizeof (iwk_add_sta_t), 1);
6506 	if (err != IWK_SUCCESS) {
6507 		cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6508 		    "failed to add IBSS node\n");
6509 		ibss_node_p->used = 0;
6510 		(void) memset(&ibss_node_p->node, 0,
6511 		    sizeof (iwk_add_sta_t));
6512 		return (err);
6513 	}
6514 
6515 	sc->sc_ibss.node_number++;
6516 
6517 	(void) memset(&link_quality, 0, sizeof (link_quality));
6518 
6519 	rs = ic->ic_sup_rates[ieee80211_chan2mode(ic,
6520 	    ic->ic_curchan)];
6521 
6522 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
6523 		if (i < rs.ir_nrates) {
6524 			rate = rs.
6525 			    ir_rates[rs.ir_nrates - i];
6526 		} else {
6527 			rate = 2;
6528 		}
6529 
6530 		if (2 == rate || 4 == rate ||
6531 		    11 == rate || 22 == rate) {
6532 			masks |= RATE_MCS_CCK_MSK;
6533 		}
6534 
6535 		masks |= RATE_MCS_ANT_B_MSK;
6536 		masks &= ~RATE_MCS_ANT_A_MSK;
6537 
6538 		link_quality.rate_n_flags[i] =
6539 		    iwk_rate_to_plcp(rate) | masks;
6540 	}
6541 
6542 	link_quality.general_params.single_stream_ant_msk = 2;
6543 	link_quality.general_params.dual_stream_ant_msk = 3;
6544 	link_quality.agg_params.agg_dis_start_th = 3;
6545 	link_quality.agg_params.agg_time_limit = LE_16(4000);
6546 	link_quality.sta_id = ibss_node_p->node.id;
6547 
6548 	err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD,
6549 	    &link_quality, sizeof (link_quality), 1);
6550 	if (err != IWK_SUCCESS) {
6551 		cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6552 		    "failed to set up TX link quality\n");
6553 		ibss_node_p->used = 0;
6554 		(void) memset(ibss_node_p->node.bssid, 0, 6);
6555 		return (err);
6556 	}
6557 
6558 	*index2 = index;
6559 
6560 	return (err);
6561 }
6562