xref: /titanic_44/usr/src/uts/common/io/iwk/iwk2.c (revision a1e9eea083a8f257157edb8a1efb5bbd300eb4bf)
1 /*
2  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright (c) 2007, Intel Corporation
8  * All rights reserved.
9  */
10 
11 /*
12  * Copyright (c) 2006
13  * Copyright (c) 2007
14  *	Damien Bergamini <damien.bergamini@free.fr>
15  *
16  * Permission to use, copy, modify, and distribute this software for any
17  * purpose with or without fee is hereby granted, provided that the above
18  * copyright notice and this permission notice appear in all copies.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
21  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
22  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
24  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
25  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
26  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27  */
28 
29 /*
30  * Driver for Intel PRO/Wireless 4965AGN(kedron) 802.11 network adapters.
31  */
32 
33 #include <sys/types.h>
34 #include <sys/byteorder.h>
35 #include <sys/conf.h>
36 #include <sys/cmn_err.h>
37 #include <sys/stat.h>
38 #include <sys/ddi.h>
39 #include <sys/sunddi.h>
40 #include <sys/strsubr.h>
41 #include <sys/ethernet.h>
42 #include <inet/common.h>
43 #include <inet/nd.h>
44 #include <inet/mi.h>
45 #include <sys/note.h>
46 #include <sys/stream.h>
47 #include <sys/strsun.h>
48 #include <sys/modctl.h>
49 #include <sys/devops.h>
50 #include <sys/dlpi.h>
51 #include <sys/mac_provider.h>
52 #include <sys/mac_wifi.h>
53 #include <sys/net80211.h>
54 #include <sys/net80211_proto.h>
55 #include <sys/varargs.h>
56 #include <sys/policy.h>
57 #include <sys/pci.h>
58 
59 #include "iwk_calibration.h"
60 #include "iwk_hw.h"
61 #include "iwk_eeprom.h"
62 #include "iwk2_var.h"
63 #include <inet/wifi_ioctl.h>
64 
65 #ifdef DEBUG
66 #define	IWK_DEBUG_80211		(1 << 0)
67 #define	IWK_DEBUG_CMD		(1 << 1)
68 #define	IWK_DEBUG_DMA		(1 << 2)
69 #define	IWK_DEBUG_EEPROM	(1 << 3)
70 #define	IWK_DEBUG_FW		(1 << 4)
71 #define	IWK_DEBUG_HW		(1 << 5)
72 #define	IWK_DEBUG_INTR		(1 << 6)
73 #define	IWK_DEBUG_MRR		(1 << 7)
74 #define	IWK_DEBUG_PIO		(1 << 8)
75 #define	IWK_DEBUG_RX		(1 << 9)
76 #define	IWK_DEBUG_SCAN		(1 << 10)
77 #define	IWK_DEBUG_TX		(1 << 11)
78 #define	IWK_DEBUG_RATECTL	(1 << 12)
79 #define	IWK_DEBUG_RADIO		(1 << 13)
80 #define	IWK_DEBUG_RESUME	(1 << 14)
81 #define	IWK_DEBUG_CALIBRATION	(1 << 15)
82 uint32_t iwk_dbg_flags = 0;
83 #define	IWK_DBG(x) \
84 	iwk_dbg x
85 #else
86 #define	IWK_DBG(x)
87 #endif
88 
89 static void	*iwk_soft_state_p = NULL;
90 static uint8_t iwk_fw_bin [] = {
91 #include "fw-iw/iw4965.ucode.hex"
92 };
93 
94 /* DMA attributes for a shared page */
95 static ddi_dma_attr_t sh_dma_attr = {
96 	DMA_ATTR_V0,	/* version of this structure */
97 	0,		/* lowest usable address */
98 	0xffffffffU,	/* highest usable address */
99 	0xffffffffU,	/* maximum DMAable byte count */
100 	0x1000,		/* alignment in bytes */
101 	0x1000,		/* burst sizes (any?) */
102 	1,		/* minimum transfer */
103 	0xffffffffU,	/* maximum transfer */
104 	0xffffffffU,	/* maximum segment length */
105 	1,		/* maximum number of segments */
106 	1,		/* granularity */
107 	0,		/* flags (reserved) */
108 };
109 
110 /* DMA attributes for a keep warm DRAM descriptor */
111 static ddi_dma_attr_t kw_dma_attr = {
112 	DMA_ATTR_V0,	/* version of this structure */
113 	0,		/* lowest usable address */
114 	0xffffffffU,	/* highest usable address */
115 	0xffffffffU,	/* maximum DMAable byte count */
116 	0x1000,		/* alignment in bytes */
117 	0x1000,		/* burst sizes (any?) */
118 	1,		/* minimum transfer */
119 	0xffffffffU,	/* maximum transfer */
120 	0xffffffffU,	/* maximum segment length */
121 	1,		/* maximum number of segments */
122 	1,		/* granularity */
123 	0,		/* flags (reserved) */
124 };
125 
126 /* DMA attributes for a ring descriptor */
127 static ddi_dma_attr_t ring_desc_dma_attr = {
128 	DMA_ATTR_V0,	/* version of this structure */
129 	0,		/* lowest usable address */
130 	0xffffffffU,	/* highest usable address */
131 	0xffffffffU,	/* maximum DMAable byte count */
132 	0x100,		/* alignment in bytes */
133 	0x100,		/* burst sizes (any?) */
134 	1,		/* minimum transfer */
135 	0xffffffffU,	/* maximum transfer */
136 	0xffffffffU,	/* maximum segment length */
137 	1,		/* maximum number of segments */
138 	1,		/* granularity */
139 	0,		/* flags (reserved) */
140 };
141 
142 /* DMA attributes for a cmd */
143 static ddi_dma_attr_t cmd_dma_attr = {
144 	DMA_ATTR_V0,	/* version of this structure */
145 	0,		/* lowest usable address */
146 	0xffffffffU,	/* highest usable address */
147 	0xffffffffU,	/* maximum DMAable byte count */
148 	4,		/* alignment in bytes */
149 	0x100,		/* burst sizes (any?) */
150 	1,		/* minimum transfer */
151 	0xffffffffU,	/* maximum transfer */
152 	0xffffffffU,	/* maximum segment length */
153 	1,		/* maximum number of segments */
154 	1,		/* granularity */
155 	0,		/* flags (reserved) */
156 };
157 
158 /* DMA attributes for a rx buffer */
159 static ddi_dma_attr_t rx_buffer_dma_attr = {
160 	DMA_ATTR_V0,	/* version of this structure */
161 	0,		/* lowest usable address */
162 	0xffffffffU,	/* highest usable address */
163 	0xffffffffU,	/* maximum DMAable byte count */
164 	0x100,		/* alignment in bytes */
165 	0x100,		/* burst sizes (any?) */
166 	1,		/* minimum transfer */
167 	0xffffffffU,	/* maximum transfer */
168 	0xffffffffU,	/* maximum segment length */
169 	1,		/* maximum number of segments */
170 	1,		/* granularity */
171 	0,		/* flags (reserved) */
172 };
173 
174 /*
175  * DMA attributes for a tx buffer.
176  * the maximum number of segments is 4 for the hardware.
177  * now all the wifi drivers put the whole frame in a single
178  * descriptor, so we define the maximum  number of segments 1,
179  * just the same as the rx_buffer. we consider leverage the HW
180  * ability in the future, that is why we don't define rx and tx
181  * buffer_dma_attr as the same.
182  */
183 static ddi_dma_attr_t tx_buffer_dma_attr = {
184 	DMA_ATTR_V0,	/* version of this structure */
185 	0,		/* lowest usable address */
186 	0xffffffffU,	/* highest usable address */
187 	0xffffffffU,	/* maximum DMAable byte count */
188 	4,		/* alignment in bytes */
189 	0x100,		/* burst sizes (any?) */
190 	1,		/* minimum transfer */
191 	0xffffffffU,	/* maximum transfer */
192 	0xffffffffU,	/* maximum segment length */
193 	1,		/* maximum number of segments */
194 	1,		/* granularity */
195 	0,		/* flags (reserved) */
196 };
197 
198 /* DMA attributes for text and data part in the firmware */
199 static ddi_dma_attr_t fw_dma_attr = {
200 	DMA_ATTR_V0,	/* version of this structure */
201 	0,		/* lowest usable address */
202 	0xffffffffU,	/* highest usable address */
203 	0x7fffffff,	/* maximum DMAable byte count */
204 	0x10,		/* alignment in bytes */
205 	0x100,		/* burst sizes (any?) */
206 	1,		/* minimum transfer */
207 	0xffffffffU,	/* maximum transfer */
208 	0xffffffffU,	/* maximum segment length */
209 	1,		/* maximum number of segments */
210 	1,		/* granularity */
211 	0,		/* flags (reserved) */
212 };
213 
214 
215 /* regs access attributes */
216 static ddi_device_acc_attr_t iwk_reg_accattr = {
217 	DDI_DEVICE_ATTR_V0,
218 	DDI_STRUCTURE_LE_ACC,
219 	DDI_STRICTORDER_ACC,
220 	DDI_DEFAULT_ACC
221 };
222 
223 /* DMA access attributes */
224 static ddi_device_acc_attr_t iwk_dma_accattr = {
225 	DDI_DEVICE_ATTR_V0,
226 	DDI_NEVERSWAP_ACC,
227 	DDI_STRICTORDER_ACC,
228 	DDI_DEFAULT_ACC
229 };
230 
231 static int	iwk_ring_init(iwk_sc_t *);
232 static void	iwk_ring_free(iwk_sc_t *);
233 static int	iwk_alloc_shared(iwk_sc_t *);
234 static void	iwk_free_shared(iwk_sc_t *);
235 static int	iwk_alloc_kw(iwk_sc_t *);
236 static void	iwk_free_kw(iwk_sc_t *);
237 static int	iwk_alloc_fw_dma(iwk_sc_t *);
238 static void	iwk_free_fw_dma(iwk_sc_t *);
239 static int	iwk_alloc_rx_ring(iwk_sc_t *);
240 static void	iwk_reset_rx_ring(iwk_sc_t *);
241 static void	iwk_free_rx_ring(iwk_sc_t *);
242 static int	iwk_alloc_tx_ring(iwk_sc_t *, iwk_tx_ring_t *,
243     int, int);
244 static void	iwk_reset_tx_ring(iwk_sc_t *, iwk_tx_ring_t *);
245 static void	iwk_free_tx_ring(iwk_sc_t *, iwk_tx_ring_t *);
246 
247 static ieee80211_node_t *iwk_node_alloc(ieee80211com_t *);
248 static void	iwk_node_free(ieee80211_node_t *);
249 static int	iwk_newstate(ieee80211com_t *, enum ieee80211_state, int);
250 static int	iwk_key_set(ieee80211com_t *, const struct ieee80211_key *,
251     const uint8_t mac[IEEE80211_ADDR_LEN]);
252 static void	iwk_mac_access_enter(iwk_sc_t *);
253 static void	iwk_mac_access_exit(iwk_sc_t *);
254 static uint32_t	iwk_reg_read(iwk_sc_t *, uint32_t);
255 static void	iwk_reg_write(iwk_sc_t *, uint32_t, uint32_t);
256 static void	iwk_reg_write_region_4(iwk_sc_t *, uint32_t,
257 		    uint32_t *, int);
258 static int	iwk_load_firmware(iwk_sc_t *);
259 static void	iwk_rx_intr(iwk_sc_t *, iwk_rx_desc_t *,
260 		    iwk_rx_data_t *);
261 static void	iwk_tx_intr(iwk_sc_t *, iwk_rx_desc_t *,
262 		    iwk_rx_data_t *);
263 static void	iwk_cmd_intr(iwk_sc_t *, iwk_rx_desc_t *);
264 static uint_t   iwk_intr(caddr_t, caddr_t);
265 static int	iwk_eep_load(iwk_sc_t *sc);
266 static void	iwk_get_mac_from_eep(iwk_sc_t *sc);
267 static int	iwk_eep_sem_down(iwk_sc_t *sc);
268 static void	iwk_eep_sem_up(iwk_sc_t *sc);
269 static uint_t   iwk_rx_softintr(caddr_t, caddr_t);
270 static uint8_t	iwk_rate_to_plcp(int);
271 static int	iwk_cmd(iwk_sc_t *, int, const void *, int, int);
272 static void	iwk_set_led(iwk_sc_t *, uint8_t, uint8_t, uint8_t);
273 static int	iwk_hw_set_before_auth(iwk_sc_t *);
274 static int	iwk_scan(iwk_sc_t *);
275 static int	iwk_config(iwk_sc_t *);
276 static void	iwk_stop_master(iwk_sc_t *);
277 static int	iwk_power_up(iwk_sc_t *);
278 static int	iwk_preinit(iwk_sc_t *);
279 static int	iwk_init(iwk_sc_t *);
280 static void	iwk_stop(iwk_sc_t *);
281 static void	iwk_amrr_init(iwk_amrr_t *);
282 static void	iwk_amrr_timeout(iwk_sc_t *);
283 static void	iwk_amrr_ratectl(void *, ieee80211_node_t *);
284 static int32_t	iwk_curr_tempera(iwk_sc_t *sc);
285 static int	iwk_tx_power_calibration(iwk_sc_t *sc);
286 static inline int	iwk_is_24G_band(iwk_sc_t *sc);
287 static inline int	iwk_is_fat_channel(iwk_sc_t *sc);
288 static int	iwk_txpower_grp(uint16_t channel);
289 static struct	iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc,
290     uint16_t channel,
291     int is_24G, int is_fat, int is_hi_chan);
292 static int32_t	iwk_band_number(iwk_sc_t *sc, uint16_t channel);
293 static int	iwk_division(int32_t num, int32_t denom, int32_t *res);
294 static int32_t	iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1,
295     int32_t x2, int32_t y2);
296 static int	iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel,
297     struct iwk_eep_calib_channel_info *chan_info);
298 static int32_t	iwk_voltage_compensation(int32_t eep_voltage,
299     int32_t curr_voltage);
300 static int32_t	iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G);
301 static int	iwk_txpower_table_cmd_init(iwk_sc_t *sc,
302     struct iwk_tx_power_db *tp_db);
303 static void	iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc);
304 static int	iwk_is_associated(iwk_sc_t *sc);
305 static int	iwk_rxgain_diff_init(iwk_sc_t *sc);
306 static int	iwk_rxgain_diff(iwk_sc_t *sc);
307 static int	iwk_rx_sens_init(iwk_sc_t *sc);
308 static int	iwk_rx_sens(iwk_sc_t *sc);
309 static int	iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time);
310 static int	iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time);
311 static void	iwk_recv_mgmt(struct ieee80211com *ic, mblk_t *mp,
312     struct ieee80211_node *in, int subtype, int rssi, uint32_t rstamp);
313 
314 static void	iwk_write_event_log(iwk_sc_t *);
315 static void	iwk_write_error_log(iwk_sc_t *);
316 
317 static int	iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
318 static int	iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
319 static int	iwk_quiesce(dev_info_t *dip);
320 
321 /*
322  * GLD specific operations
323  */
324 static int	iwk_m_stat(void *arg, uint_t stat, uint64_t *val);
325 static int	iwk_m_start(void *arg);
326 static void	iwk_m_stop(void *arg);
327 static int	iwk_m_unicst(void *arg, const uint8_t *macaddr);
328 static int	iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m);
329 static int	iwk_m_promisc(void *arg, boolean_t on);
330 static mblk_t 	*iwk_m_tx(void *arg, mblk_t *mp);
331 static void	iwk_m_ioctl(void *arg, queue_t *wq, mblk_t *mp);
332 static int	iwk_m_setprop(void *arg, const char *pr_name,
333 	mac_prop_id_t wldp_pr_name, uint_t wldp_length, const void *wldp_buf);
334 static int	iwk_m_getprop(void *arg, const char *pr_name,
335 	mac_prop_id_t wldp_pr_name, uint_t pr_flags, uint_t wldp_length,
336 	void *wldp_buf, uint_t *perm);
337 static void	iwk_destroy_locks(iwk_sc_t *sc);
338 static int	iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type);
339 static void	iwk_thread(iwk_sc_t *sc);
340 static int	iwk_run_state_config_ibss(ieee80211com_t *ic);
341 static int	iwk_run_state_config_sta(ieee80211com_t *ic);
342 static int	iwk_start_tx_beacon(ieee80211com_t *ic);
343 static int	iwk_clean_add_node_ibss(struct ieee80211com *ic,
344     uint8_t addr[IEEE80211_ADDR_LEN], uint8_t *index2);
345 
346 /*
347  * Supported rates for 802.11b/g modes (in 500Kbps unit).
348  * 11a and 11n support will be added later.
349  */
350 static const struct ieee80211_rateset iwk_rateset_11b =
351 	{ 4, { 2, 4, 11, 22 } };
352 
353 static const struct ieee80211_rateset iwk_rateset_11g =
354 	{ 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
355 
356 /*
357  * For mfthread only
358  */
359 extern pri_t minclsyspri;
360 
361 #define	DRV_NAME_4965	"iwk"
362 
363 /*
364  * Module Loading Data & Entry Points
365  */
366 DDI_DEFINE_STREAM_OPS(iwk_devops, nulldev, nulldev, iwk_attach,
367     iwk_detach, nodev, NULL, D_MP, NULL, iwk_quiesce);
368 
369 static struct modldrv iwk_modldrv = {
370 	&mod_driverops,
371 	"Intel(R) 4965AGN driver(N)",
372 	&iwk_devops
373 };
374 
375 static struct modlinkage iwk_modlinkage = {
376 	MODREV_1,
377 	&iwk_modldrv,
378 	NULL
379 };
380 
381 int
382 _init(void)
383 {
384 	int	status;
385 
386 	status = ddi_soft_state_init(&iwk_soft_state_p,
387 	    sizeof (iwk_sc_t), 1);
388 	if (status != DDI_SUCCESS)
389 		return (status);
390 
391 	mac_init_ops(&iwk_devops, DRV_NAME_4965);
392 	status = mod_install(&iwk_modlinkage);
393 	if (status != DDI_SUCCESS) {
394 		mac_fini_ops(&iwk_devops);
395 		ddi_soft_state_fini(&iwk_soft_state_p);
396 	}
397 
398 	return (status);
399 }
400 
401 int
402 _fini(void)
403 {
404 	int status;
405 
406 	status = mod_remove(&iwk_modlinkage);
407 	if (status == DDI_SUCCESS) {
408 		mac_fini_ops(&iwk_devops);
409 		ddi_soft_state_fini(&iwk_soft_state_p);
410 	}
411 
412 	return (status);
413 }
414 
415 int
416 _info(struct modinfo *mip)
417 {
418 	return (mod_info(&iwk_modlinkage, mip));
419 }
420 
421 /*
422  * Mac Call Back entries
423  */
424 mac_callbacks_t	iwk_m_callbacks = {
425 	MC_IOCTL | MC_SETPROP | MC_GETPROP,
426 	iwk_m_stat,
427 	iwk_m_start,
428 	iwk_m_stop,
429 	iwk_m_promisc,
430 	iwk_m_multicst,
431 	iwk_m_unicst,
432 	iwk_m_tx,
433 	iwk_m_ioctl,
434 	NULL,
435 	NULL,
436 	NULL,
437 	iwk_m_setprop,
438 	iwk_m_getprop
439 };
440 
441 #ifdef DEBUG
442 void
443 iwk_dbg(uint32_t flags, const char *fmt, ...)
444 {
445 	va_list	ap;
446 
447 	if (flags & iwk_dbg_flags) {
448 		va_start(ap, fmt);
449 		vcmn_err(CE_NOTE, fmt, ap);
450 		va_end(ap);
451 	}
452 }
453 #endif
454 
455 /*
456  * device operations
457  */
458 int
459 iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
460 {
461 	iwk_sc_t		*sc;
462 	ieee80211com_t	*ic;
463 	int			instance, err, i;
464 	char			strbuf[32];
465 	wifi_data_t		wd = { 0 };
466 	mac_register_t		*macp;
467 
468 	int			intr_type;
469 	int			intr_count;
470 	int			intr_actual;
471 
472 	switch (cmd) {
473 	case DDI_ATTACH:
474 		break;
475 	case DDI_RESUME:
476 		sc = ddi_get_soft_state(iwk_soft_state_p,
477 		    ddi_get_instance(dip));
478 		ASSERT(sc != NULL);
479 		if (sc->sc_flags & IWK_F_RUNNING)
480 			(void) iwk_init(sc);
481 
482 		mutex_enter(&sc->sc_glock);
483 		sc->sc_flags &= ~IWK_F_SUSPEND;
484 		sc->sc_flags |= IWK_F_LAZY_RESUME;
485 		mutex_exit(&sc->sc_glock);
486 
487 		IWK_DBG((IWK_DEBUG_RESUME, "iwk: resume\n"));
488 		return (DDI_SUCCESS);
489 	default:
490 		err = DDI_FAILURE;
491 		goto attach_fail1;
492 	}
493 
494 	instance = ddi_get_instance(dip);
495 	err = ddi_soft_state_zalloc(iwk_soft_state_p, instance);
496 	if (err != DDI_SUCCESS) {
497 		cmn_err(CE_WARN,
498 		    "iwk_attach(): failed to allocate soft state\n");
499 		goto attach_fail1;
500 	}
501 	sc = ddi_get_soft_state(iwk_soft_state_p, instance);
502 	sc->sc_dip = dip;
503 
504 	err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0,
505 	    &iwk_reg_accattr, &sc->sc_cfg_handle);
506 	if (err != DDI_SUCCESS) {
507 		cmn_err(CE_WARN,
508 		    "iwk_attach(): failed to map config spaces regs\n");
509 		goto attach_fail2;
510 	}
511 	sc->sc_rev = ddi_get8(sc->sc_cfg_handle,
512 	    (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID));
513 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0x41), 0);
514 	sc->sc_clsz = ddi_get16(sc->sc_cfg_handle,
515 	    (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ));
516 	if (!sc->sc_clsz)
517 		sc->sc_clsz = 16;
518 	sc->sc_clsz = (sc->sc_clsz << 2);
519 	sc->sc_dmabuf_sz = roundup(0x1000 + sizeof (struct ieee80211_frame) +
520 	    IEEE80211_MTU + IEEE80211_CRC_LEN +
521 	    (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
522 	    IEEE80211_WEP_CRCLEN), sc->sc_clsz);
523 	/*
524 	 * Map operating registers
525 	 */
526 	err = ddi_regs_map_setup(dip, 1, &sc->sc_base,
527 	    0, 0, &iwk_reg_accattr, &sc->sc_handle);
528 	if (err != DDI_SUCCESS) {
529 		cmn_err(CE_WARN,
530 		    "iwk_attach(): failed to map device regs\n");
531 		goto attach_fail2a;
532 	}
533 
534 	err = ddi_intr_get_supported_types(dip, &intr_type);
535 	if ((err != DDI_SUCCESS) || (!(intr_type & DDI_INTR_TYPE_FIXED))) {
536 		cmn_err(CE_WARN, "iwk_attach(): "
537 		    "Fixed type interrupt is not supported\n");
538 		goto attach_fail_intr_a;
539 	}
540 
541 	err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &intr_count);
542 	if ((err != DDI_SUCCESS) || (intr_count != 1)) {
543 		cmn_err(CE_WARN, "iwk_attach(): "
544 		    "No fixed interrupts\n");
545 		goto attach_fail_intr_a;
546 	}
547 
548 	sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
549 
550 	err = ddi_intr_alloc(dip, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0,
551 	    intr_count, &intr_actual, 0);
552 	if ((err != DDI_SUCCESS) || (intr_actual != 1)) {
553 		cmn_err(CE_WARN, "iwk_attach(): "
554 		    "ddi_intr_alloc() failed 0x%x\n", err);
555 		goto attach_fail_intr_b;
556 	}
557 
558 	err = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri);
559 	if (err != DDI_SUCCESS) {
560 		cmn_err(CE_WARN, "iwk_attach(): "
561 		    "ddi_intr_get_pri() failed 0x%x\n", err);
562 		goto attach_fail_intr_c;
563 	}
564 
565 	mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER,
566 	    DDI_INTR_PRI(sc->sc_intr_pri));
567 	mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER,
568 	    DDI_INTR_PRI(sc->sc_intr_pri));
569 	mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER,
570 	    DDI_INTR_PRI(sc->sc_intr_pri));
571 	mutex_init(&sc->sc_ibss.node_tb_lock, NULL, MUTEX_DRIVER,
572 	    DDI_INTR_PRI(sc->sc_intr_pri));
573 
574 	cv_init(&sc->sc_fw_cv, NULL, CV_DRIVER, NULL);
575 	cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL);
576 	cv_init(&sc->sc_tx_cv, "tx-ring", CV_DRIVER, NULL);
577 	/*
578 	 * initialize the mfthread
579 	 */
580 	cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL);
581 	sc->sc_mf_thread = NULL;
582 	sc->sc_mf_thread_switch = 0;
583 
584 	/*
585 	 * Allocate shared page.
586 	 */
587 	err = iwk_alloc_shared(sc);
588 	if (err != DDI_SUCCESS) {
589 		cmn_err(CE_WARN, "iwk_attach(): "
590 		    "failed to allocate shared page\n");
591 		goto attach_fail3;
592 	}
593 
594 	/*
595 	 * Allocate keep warm page.
596 	 */
597 	err = iwk_alloc_kw(sc);
598 	if (err != DDI_SUCCESS) {
599 		cmn_err(CE_WARN, "iwk_attach(): "
600 		    "failed to allocate keep warm page\n");
601 		goto attach_fail3a;
602 	}
603 
604 	/*
605 	 * Do some necessary hardware initializations.
606 	 */
607 	err = iwk_preinit(sc);
608 	if (err != DDI_SUCCESS) {
609 		cmn_err(CE_WARN, "iwk_attach(): "
610 		    "failed to init hardware\n");
611 		goto attach_fail4;
612 	}
613 
614 	/* initialize EEPROM */
615 	err = iwk_eep_load(sc);  /* get hardware configurations from eeprom */
616 	if (err != 0) {
617 		cmn_err(CE_WARN, "iwk_attach(): failed to load eeprom\n");
618 		goto attach_fail4;
619 	}
620 
621 	if (sc->sc_eep_map.calib_version < EEP_TX_POWER_VERSION_NEW) {
622 		cmn_err(CE_WARN, "older EEPROM detected\n");
623 		goto attach_fail4;
624 	}
625 
626 	iwk_get_mac_from_eep(sc);
627 
628 	err = iwk_ring_init(sc);
629 	if (err != DDI_SUCCESS) {
630 		cmn_err(CE_WARN, "iwk_attach(): "
631 		    "failed to allocate and initialize ring\n");
632 		goto attach_fail4;
633 	}
634 
635 	sc->sc_hdr = (iwk_firmware_hdr_t *)iwk_fw_bin;
636 
637 	err = iwk_alloc_fw_dma(sc);
638 	if (err != DDI_SUCCESS) {
639 		cmn_err(CE_WARN, "iwk_attach(): "
640 		    "failed to allocate firmware dma\n");
641 		goto attach_fail5;
642 	}
643 
644 	/*
645 	 * Initialize the wifi part, which will be used by
646 	 * generic layer
647 	 */
648 	ic = &sc->sc_ic;
649 	ic->ic_phytype  = IEEE80211_T_OFDM;
650 	ic->ic_opmode   = IEEE80211_M_STA; /* default to BSS mode */
651 	ic->ic_state    = IEEE80211_S_INIT;
652 	ic->ic_maxrssi  = 100; /* experimental number */
653 	ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT |
654 	    IEEE80211_C_PMGT | IEEE80211_C_SHSLOT;
655 	/*
656 	 * use software WEP and TKIP, hardware CCMP;
657 	 */
658 	ic->ic_caps |= IEEE80211_C_AES_CCM;
659 	/*
660 	 * Support WPA/WPA2
661 	 */
662 	ic->ic_caps |= IEEE80211_C_WPA;
663 	/*
664 	 * support Adhoc mode
665 	 */
666 	ic->ic_caps |= IEEE80211_C_IBSS;
667 
668 	/* set supported .11b and .11g rates */
669 	ic->ic_sup_rates[IEEE80211_MODE_11B] = iwk_rateset_11b;
670 	ic->ic_sup_rates[IEEE80211_MODE_11G] = iwk_rateset_11g;
671 
672 	/* set supported .11b and .11g channels (1 through 11) */
673 	for (i = 1; i <= 11; i++) {
674 		ic->ic_sup_channels[i].ich_freq =
675 		    ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
676 		ic->ic_sup_channels[i].ich_flags =
677 		    IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
678 		    IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ |
679 		    IEEE80211_CHAN_PASSIVE;
680 	}
681 	ic->ic_ibss_chan = &ic->ic_sup_channels[0];
682 
683 	ic->ic_xmit = iwk_send;
684 	/*
685 	 * init Wifi layer
686 	 */
687 	ieee80211_attach(ic);
688 
689 	/*
690 	 * different instance has different WPA door
691 	 */
692 	(void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
693 	    ddi_driver_name(dip),
694 	    ddi_get_instance(dip));
695 
696 	/*
697 	 * Override 80211 default routines
698 	 */
699 	sc->sc_newstate = ic->ic_newstate;
700 	ic->ic_newstate = iwk_newstate;
701 	sc->sc_recv_mgmt = ic->ic_recv_mgmt;
702 	ic->ic_recv_mgmt = iwk_recv_mgmt;
703 	ic->ic_node_alloc = iwk_node_alloc;
704 	ic->ic_node_free = iwk_node_free;
705 	ic->ic_crypto.cs_key_set = iwk_key_set;
706 	ieee80211_media_init(ic);
707 	/*
708 	 * initialize default tx key
709 	 */
710 	ic->ic_def_txkey = 0;
711 	err = ddi_intr_add_softint(dip, &sc->sc_soft_hdl, DDI_INTR_SOFTPRI_MAX,
712 	    iwk_rx_softintr, (caddr_t)sc);
713 	if (err != DDI_SUCCESS) {
714 		cmn_err(CE_WARN, "iwk_attach(): "
715 		    "add soft interrupt failed\n");
716 		goto attach_fail7;
717 	}
718 
719 	/*
720 	 * Add the interrupt handler
721 	 */
722 	err = ddi_intr_add_handler(sc->sc_intr_htable[0], iwk_intr,
723 	    (caddr_t)sc, NULL);
724 	if (err != DDI_SUCCESS) {
725 		cmn_err(CE_WARN, "iwk_attach(): "
726 		    "ddi_intr_add_handle() failed\n");
727 		goto attach_fail8;
728 	}
729 
730 	err = ddi_intr_enable(sc->sc_intr_htable[0]);
731 	if (err != DDI_SUCCESS) {
732 		cmn_err(CE_WARN, "iwk_attach(): "
733 		    "ddi_intr_enable() failed\n");
734 		goto attach_fail_intr_d;
735 	}
736 
737 	/*
738 	 * Initialize pointer to device specific functions
739 	 */
740 	wd.wd_secalloc = WIFI_SEC_NONE;
741 	wd.wd_opmode = ic->ic_opmode;
742 	IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr);
743 
744 	macp = mac_alloc(MAC_VERSION);
745 	if (macp == NULL) {
746 		cmn_err(CE_WARN,
747 		    "iwk_attach(): failed to do mac_alloc()\n");
748 		goto attach_fail9;
749 	}
750 
751 	macp->m_type_ident	= MAC_PLUGIN_IDENT_WIFI;
752 	macp->m_driver		= sc;
753 	macp->m_dip		= dip;
754 	macp->m_src_addr	= ic->ic_macaddr;
755 	macp->m_callbacks	= &iwk_m_callbacks;
756 	macp->m_min_sdu		= 0;
757 	macp->m_max_sdu		= IEEE80211_MTU;
758 	macp->m_pdata		= &wd;
759 	macp->m_pdata_size	= sizeof (wd);
760 
761 	/*
762 	 * Register the macp to mac
763 	 */
764 	err = mac_register(macp, &ic->ic_mach);
765 	mac_free(macp);
766 	if (err != DDI_SUCCESS) {
767 		cmn_err(CE_WARN,
768 		    "iwk_attach(): failed to do mac_register()\n");
769 		goto attach_fail9;
770 	}
771 
772 	/*
773 	 * Create minor node of type DDI_NT_NET_WIFI
774 	 */
775 	(void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_4965"%d", instance);
776 	err = ddi_create_minor_node(dip, strbuf, S_IFCHR,
777 	    instance + 1, DDI_NT_NET_WIFI, 0);
778 	if (err != DDI_SUCCESS)
779 		cmn_err(CE_WARN,
780 		    "iwk_attach(): failed to do ddi_create_minor_node()\n");
781 
782 	/*
783 	 * Notify link is down now
784 	 */
785 	mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
786 
787 	/*
788 	 * create the mf thread to handle the link status,
789 	 * recovery fatal error, etc.
790 	 */
791 	sc->sc_mf_thread_switch = 1;
792 	if (sc->sc_mf_thread == NULL)
793 		sc->sc_mf_thread = thread_create((caddr_t)NULL, 0,
794 		    iwk_thread, sc, 0, &p0, TS_RUN, minclsyspri);
795 
796 	sc->sc_flags |= IWK_F_ATTACHED;
797 
798 	return (DDI_SUCCESS);
799 attach_fail9:
800 	(void) ddi_intr_disable(sc->sc_intr_htable[0]);
801 attach_fail_intr_d:
802 	(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
803 
804 attach_fail8:
805 	(void) ddi_intr_remove_softint(sc->sc_soft_hdl);
806 	sc->sc_soft_hdl = NULL;
807 attach_fail7:
808 	ieee80211_detach(ic);
809 attach_fail6:
810 	iwk_free_fw_dma(sc);
811 attach_fail5:
812 	iwk_ring_free(sc);
813 attach_fail4:
814 	iwk_free_kw(sc);
815 attach_fail3a:
816 	iwk_free_shared(sc);
817 attach_fail3:
818 	iwk_destroy_locks(sc);
819 attach_fail_intr_c:
820 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
821 attach_fail_intr_b:
822 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
823 attach_fail_intr_a:
824 	ddi_regs_map_free(&sc->sc_handle);
825 attach_fail2a:
826 	ddi_regs_map_free(&sc->sc_cfg_handle);
827 attach_fail2:
828 	ddi_soft_state_free(iwk_soft_state_p, instance);
829 attach_fail1:
830 	return (err);
831 }
832 
833 int
834 iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
835 {
836 	iwk_sc_t	*sc;
837 	int err;
838 
839 	sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip));
840 	ASSERT(sc != NULL);
841 
842 	switch (cmd) {
843 	case DDI_DETACH:
844 		break;
845 	case DDI_SUSPEND:
846 		mutex_enter(&sc->sc_glock);
847 		sc->sc_flags |= IWK_F_SUSPEND;
848 		mutex_exit(&sc->sc_glock);
849 		if (sc->sc_flags & IWK_F_RUNNING) {
850 			iwk_stop(sc);
851 		}
852 
853 		IWK_DBG((IWK_DEBUG_RESUME, "iwk: suspend\n"));
854 		return (DDI_SUCCESS);
855 	default:
856 		return (DDI_FAILURE);
857 	}
858 
859 	if (!(sc->sc_flags & IWK_F_ATTACHED))
860 		return (DDI_FAILURE);
861 
862 	err = mac_disable(sc->sc_ic.ic_mach);
863 	if (err != DDI_SUCCESS)
864 		return (err);
865 
866 	/*
867 	 * Destroy the mf_thread
868 	 */
869 	mutex_enter(&sc->sc_mt_lock);
870 	sc->sc_mf_thread_switch = 0;
871 	while (sc->sc_mf_thread != NULL) {
872 		if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0)
873 			break;
874 	}
875 	mutex_exit(&sc->sc_mt_lock);
876 
877 	iwk_stop(sc);
878 	DELAY(500000);
879 
880 	/*
881 	 * Unregiste from the MAC layer subsystem
882 	 */
883 	(void) mac_unregister(sc->sc_ic.ic_mach);
884 
885 	mutex_enter(&sc->sc_glock);
886 	iwk_free_fw_dma(sc);
887 	iwk_ring_free(sc);
888 	iwk_free_kw(sc);
889 	iwk_free_shared(sc);
890 	mutex_exit(&sc->sc_glock);
891 
892 	(void) ddi_intr_disable(sc->sc_intr_htable[0]);
893 	(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
894 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
895 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
896 
897 	(void) ddi_intr_remove_softint(sc->sc_soft_hdl);
898 	sc->sc_soft_hdl = NULL;
899 
900 	/*
901 	 * detach ieee80211
902 	 */
903 	ieee80211_detach(&sc->sc_ic);
904 
905 	iwk_destroy_locks(sc);
906 
907 	ddi_regs_map_free(&sc->sc_handle);
908 	ddi_regs_map_free(&sc->sc_cfg_handle);
909 	ddi_remove_minor_node(dip, NULL);
910 	ddi_soft_state_free(iwk_soft_state_p, ddi_get_instance(dip));
911 
912 	return (DDI_SUCCESS);
913 }
914 
915 /*
916  * quiesce(9E) entry point.
917  *
918  * This function is called when the system is single-threaded at high
919  * PIL with preemption disabled. Therefore, this function must not be
920  * blocked.
921  *
922  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
923  * DDI_FAILURE indicates an error condition and should almost never happen.
924  */
925 int
926 iwk_quiesce(dev_info_t *dip)
927 {
928 	iwk_sc_t	*sc;
929 
930 	sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip));
931 	ASSERT(sc != NULL);
932 
933 	/* no message prints and no lock accquisition */
934 #ifdef DEBUG
935 	iwk_dbg_flags = 0;
936 #endif
937 	sc->sc_flags |= IWK_F_QUIESCED;
938 
939 	iwk_stop(sc);
940 
941 	return (DDI_SUCCESS);
942 }
943 
944 static void
945 iwk_destroy_locks(iwk_sc_t *sc)
946 {
947 	cv_destroy(&sc->sc_mt_cv);
948 	mutex_destroy(&sc->sc_mt_lock);
949 	cv_destroy(&sc->sc_tx_cv);
950 	cv_destroy(&sc->sc_cmd_cv);
951 	cv_destroy(&sc->sc_fw_cv);
952 	mutex_destroy(&sc->sc_tx_lock);
953 	mutex_destroy(&sc->sc_glock);
954 }
955 
956 /*
957  * Allocate an area of memory and a DMA handle for accessing it
958  */
959 static int
960 iwk_alloc_dma_mem(iwk_sc_t *sc, size_t memsize,
961     ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p,
962     uint_t dma_flags, iwk_dma_t *dma_p)
963 {
964 	caddr_t vaddr;
965 	int err;
966 
967 	/*
968 	 * Allocate handle
969 	 */
970 	err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p,
971 	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
972 	if (err != DDI_SUCCESS) {
973 		dma_p->dma_hdl = NULL;
974 		return (DDI_FAILURE);
975 	}
976 
977 	/*
978 	 * Allocate memory
979 	 */
980 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
981 	    dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
982 	    DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
983 	if (err != DDI_SUCCESS) {
984 		ddi_dma_free_handle(&dma_p->dma_hdl);
985 		dma_p->dma_hdl = NULL;
986 		dma_p->acc_hdl = NULL;
987 		return (DDI_FAILURE);
988 	}
989 
990 	/*
991 	 * Bind the two together
992 	 */
993 	dma_p->mem_va = vaddr;
994 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
995 	    vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
996 	    &dma_p->cookie, &dma_p->ncookies);
997 	if (err != DDI_DMA_MAPPED) {
998 		ddi_dma_mem_free(&dma_p->acc_hdl);
999 		ddi_dma_free_handle(&dma_p->dma_hdl);
1000 		dma_p->acc_hdl = NULL;
1001 		dma_p->dma_hdl = NULL;
1002 		return (DDI_FAILURE);
1003 	}
1004 
1005 	dma_p->nslots = ~0U;
1006 	dma_p->size = ~0U;
1007 	dma_p->token = ~0U;
1008 	dma_p->offset = 0;
1009 	return (DDI_SUCCESS);
1010 }
1011 
1012 /*
1013  * Free one allocated area of DMAable memory
1014  */
1015 static void
1016 iwk_free_dma_mem(iwk_dma_t *dma_p)
1017 {
1018 	if (dma_p->dma_hdl != NULL) {
1019 		if (dma_p->ncookies) {
1020 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
1021 			dma_p->ncookies = 0;
1022 		}
1023 		ddi_dma_free_handle(&dma_p->dma_hdl);
1024 		dma_p->dma_hdl = NULL;
1025 	}
1026 
1027 	if (dma_p->acc_hdl != NULL) {
1028 		ddi_dma_mem_free(&dma_p->acc_hdl);
1029 		dma_p->acc_hdl = NULL;
1030 	}
1031 }
1032 
1033 /*
1034  *
1035  */
1036 static int
1037 iwk_alloc_fw_dma(iwk_sc_t *sc)
1038 {
1039 	int err = DDI_SUCCESS;
1040 	iwk_dma_t *dma_p;
1041 	char *t;
1042 
1043 	/*
1044 	 * firmware image layout:
1045 	 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->|
1046 	 */
1047 	t = (char *)(sc->sc_hdr + 1);
1048 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz),
1049 	    &fw_dma_attr, &iwk_dma_accattr,
1050 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1051 	    &sc->sc_dma_fw_text);
1052 	dma_p = &sc->sc_dma_fw_text;
1053 	IWK_DBG((IWK_DEBUG_DMA, "text[ncookies:%d addr:%lx size:%lx]\n",
1054 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1055 	    dma_p->cookie.dmac_size));
1056 	if (err != DDI_SUCCESS) {
1057 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1058 		    " text dma memory");
1059 		goto fail;
1060 	}
1061 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->textsz));
1062 
1063 	t += LE_32(sc->sc_hdr->textsz);
1064 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1065 	    &fw_dma_attr, &iwk_dma_accattr,
1066 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1067 	    &sc->sc_dma_fw_data);
1068 	dma_p = &sc->sc_dma_fw_data;
1069 	IWK_DBG((IWK_DEBUG_DMA, "data[ncookies:%d addr:%lx size:%lx]\n",
1070 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1071 	    dma_p->cookie.dmac_size));
1072 	if (err != DDI_SUCCESS) {
1073 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1074 		    " data dma memory");
1075 		goto fail;
1076 	}
1077 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1078 
1079 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1080 	    &fw_dma_attr, &iwk_dma_accattr,
1081 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1082 	    &sc->sc_dma_fw_data_bak);
1083 	dma_p = &sc->sc_dma_fw_data_bak;
1084 	IWK_DBG((IWK_DEBUG_DMA, "data_bak[ncookies:%d addr:%lx "
1085 	    "size:%lx]\n",
1086 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1087 	    dma_p->cookie.dmac_size));
1088 	if (err != DDI_SUCCESS) {
1089 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1090 		    " data bakeup dma memory");
1091 		goto fail;
1092 	}
1093 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1094 
1095 	t += LE_32(sc->sc_hdr->datasz);
1096 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz),
1097 	    &fw_dma_attr, &iwk_dma_accattr,
1098 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1099 	    &sc->sc_dma_fw_init_text);
1100 	dma_p = &sc->sc_dma_fw_init_text;
1101 	IWK_DBG((IWK_DEBUG_DMA, "init_text[ncookies:%d addr:%lx "
1102 	    "size:%lx]\n",
1103 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1104 	    dma_p->cookie.dmac_size));
1105 	if (err != DDI_SUCCESS) {
1106 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1107 		    "init text dma memory");
1108 		goto fail;
1109 	}
1110 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_textsz));
1111 
1112 	t += LE_32(sc->sc_hdr->init_textsz);
1113 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz),
1114 	    &fw_dma_attr, &iwk_dma_accattr,
1115 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1116 	    &sc->sc_dma_fw_init_data);
1117 	dma_p = &sc->sc_dma_fw_init_data;
1118 	IWK_DBG((IWK_DEBUG_DMA, "init_data[ncookies:%d addr:%lx "
1119 	    "size:%lx]\n",
1120 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1121 	    dma_p->cookie.dmac_size));
1122 	if (err != DDI_SUCCESS) {
1123 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1124 		    "init data dma memory");
1125 		goto fail;
1126 	}
1127 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_datasz));
1128 
1129 	sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz);
1130 fail:
1131 	return (err);
1132 }
1133 
1134 static void
1135 iwk_free_fw_dma(iwk_sc_t *sc)
1136 {
1137 	iwk_free_dma_mem(&sc->sc_dma_fw_text);
1138 	iwk_free_dma_mem(&sc->sc_dma_fw_data);
1139 	iwk_free_dma_mem(&sc->sc_dma_fw_data_bak);
1140 	iwk_free_dma_mem(&sc->sc_dma_fw_init_text);
1141 	iwk_free_dma_mem(&sc->sc_dma_fw_init_data);
1142 }
1143 
1144 /*
1145  * Allocate a shared page between host and NIC.
1146  */
1147 static int
1148 iwk_alloc_shared(iwk_sc_t *sc)
1149 {
1150 	iwk_dma_t *dma_p;
1151 	int err = DDI_SUCCESS;
1152 
1153 	/* must be aligned on a 4K-page boundary */
1154 	err = iwk_alloc_dma_mem(sc, sizeof (iwk_shared_t),
1155 	    &sh_dma_attr, &iwk_dma_accattr,
1156 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1157 	    &sc->sc_dma_sh);
1158 	if (err != DDI_SUCCESS)
1159 		goto fail;
1160 	sc->sc_shared = (iwk_shared_t *)sc->sc_dma_sh.mem_va;
1161 
1162 	dma_p = &sc->sc_dma_sh;
1163 	IWK_DBG((IWK_DEBUG_DMA, "sh[ncookies:%d addr:%lx size:%lx]\n",
1164 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1165 	    dma_p->cookie.dmac_size));
1166 
1167 	return (err);
1168 fail:
1169 	iwk_free_shared(sc);
1170 	return (err);
1171 }
1172 
1173 static void
1174 iwk_free_shared(iwk_sc_t *sc)
1175 {
1176 	iwk_free_dma_mem(&sc->sc_dma_sh);
1177 }
1178 
1179 /*
1180  * Allocate a keep warm page.
1181  */
1182 static int
1183 iwk_alloc_kw(iwk_sc_t *sc)
1184 {
1185 	iwk_dma_t *dma_p;
1186 	int err = DDI_SUCCESS;
1187 
1188 	/* must be aligned on a 4K-page boundary */
1189 	err = iwk_alloc_dma_mem(sc, IWK_KW_SIZE,
1190 	    &kw_dma_attr, &iwk_dma_accattr,
1191 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1192 	    &sc->sc_dma_kw);
1193 	if (err != DDI_SUCCESS)
1194 		goto fail;
1195 
1196 	dma_p = &sc->sc_dma_kw;
1197 	IWK_DBG((IWK_DEBUG_DMA, "kw[ncookies:%d addr:%lx size:%lx]\n",
1198 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1199 	    dma_p->cookie.dmac_size));
1200 
1201 	return (err);
1202 fail:
1203 	iwk_free_kw(sc);
1204 	return (err);
1205 }
1206 
1207 static void
1208 iwk_free_kw(iwk_sc_t *sc)
1209 {
1210 	iwk_free_dma_mem(&sc->sc_dma_kw);
1211 }
1212 
1213 static int
1214 iwk_alloc_rx_ring(iwk_sc_t *sc)
1215 {
1216 	iwk_rx_ring_t *ring;
1217 	iwk_rx_data_t *data;
1218 	iwk_dma_t *dma_p;
1219 	int i, err = DDI_SUCCESS;
1220 
1221 	ring = &sc->sc_rxq;
1222 	ring->cur = 0;
1223 
1224 	err = iwk_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t),
1225 	    &ring_desc_dma_attr, &iwk_dma_accattr,
1226 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1227 	    &ring->dma_desc);
1228 	if (err != DDI_SUCCESS) {
1229 		cmn_err(CE_WARN, "dma alloc rx ring desc failed\n");
1230 		goto fail;
1231 	}
1232 	ring->desc = (uint32_t *)ring->dma_desc.mem_va;
1233 	dma_p = &ring->dma_desc;
1234 	IWK_DBG((IWK_DEBUG_DMA, "rx bd[ncookies:%d addr:%lx size:%lx]\n",
1235 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1236 	    dma_p->cookie.dmac_size));
1237 
1238 	/*
1239 	 * Allocate Rx buffers.
1240 	 */
1241 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1242 		data = &ring->data[i];
1243 		err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1244 		    &rx_buffer_dma_attr, &iwk_dma_accattr,
1245 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1246 		    &data->dma_data);
1247 		if (err != DDI_SUCCESS) {
1248 			cmn_err(CE_WARN, "dma alloc rx ring buf[%d] "
1249 			    "failed\n", i);
1250 			goto fail;
1251 		}
1252 		/*
1253 		 * the physical address bit [8-36] are used,
1254 		 * instead of bit [0-31] in 3945.
1255 		 */
1256 		ring->desc[i] = LE_32((uint32_t)
1257 		    (data->dma_data.cookie.dmac_address >> 8));
1258 	}
1259 	dma_p = &ring->data[0].dma_data;
1260 	IWK_DBG((IWK_DEBUG_DMA, "rx buffer[0][ncookies:%d addr:%lx "
1261 	    "size:%lx]\n",
1262 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1263 	    dma_p->cookie.dmac_size));
1264 
1265 	IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1266 
1267 	return (err);
1268 
1269 fail:
1270 	iwk_free_rx_ring(sc);
1271 	return (err);
1272 }
1273 
1274 static void
1275 iwk_reset_rx_ring(iwk_sc_t *sc)
1276 {
1277 	int n;
1278 
1279 	iwk_mac_access_enter(sc);
1280 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1281 	for (n = 0; n < 2000; n++) {
1282 		if (IWK_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24))
1283 			break;
1284 		DELAY(1000);
1285 	}
1286 
1287 	if (n == 2000)
1288 		IWK_DBG((IWK_DEBUG_DMA, "timeout resetting Rx ring\n"));
1289 
1290 	iwk_mac_access_exit(sc);
1291 
1292 	sc->sc_rxq.cur = 0;
1293 }
1294 
1295 static void
1296 iwk_free_rx_ring(iwk_sc_t *sc)
1297 {
1298 	int i;
1299 
1300 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1301 		if (sc->sc_rxq.data[i].dma_data.dma_hdl)
1302 			IWK_DMA_SYNC(sc->sc_rxq.data[i].dma_data,
1303 			    DDI_DMA_SYNC_FORCPU);
1304 		iwk_free_dma_mem(&sc->sc_rxq.data[i].dma_data);
1305 	}
1306 
1307 	if (sc->sc_rxq.dma_desc.dma_hdl)
1308 		IWK_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV);
1309 	iwk_free_dma_mem(&sc->sc_rxq.dma_desc);
1310 }
1311 
1312 static int
1313 iwk_alloc_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring,
1314     int slots, int qid)
1315 {
1316 	iwk_tx_data_t *data;
1317 	iwk_tx_desc_t *desc_h;
1318 	uint32_t paddr_desc_h;
1319 	iwk_cmd_t *cmd_h;
1320 	uint32_t paddr_cmd_h;
1321 	iwk_dma_t *dma_p;
1322 	int i, err = DDI_SUCCESS;
1323 
1324 	ring->qid = qid;
1325 	ring->count = TFD_QUEUE_SIZE_MAX;
1326 	ring->window = slots;
1327 	ring->queued = 0;
1328 	ring->cur = 0;
1329 
1330 	err = iwk_alloc_dma_mem(sc,
1331 	    TFD_QUEUE_SIZE_MAX * sizeof (iwk_tx_desc_t),
1332 	    &ring_desc_dma_attr, &iwk_dma_accattr,
1333 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1334 	    &ring->dma_desc);
1335 	if (err != DDI_SUCCESS) {
1336 		cmn_err(CE_WARN, "dma alloc tx ring desc[%d] "
1337 		    "failed\n", qid);
1338 		goto fail;
1339 	}
1340 	dma_p = &ring->dma_desc;
1341 	IWK_DBG((IWK_DEBUG_DMA, "tx bd[ncookies:%d addr:%lx size:%lx]\n",
1342 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1343 	    dma_p->cookie.dmac_size));
1344 
1345 	desc_h = (iwk_tx_desc_t *)ring->dma_desc.mem_va;
1346 	paddr_desc_h = ring->dma_desc.cookie.dmac_address;
1347 
1348 	err = iwk_alloc_dma_mem(sc,
1349 	    TFD_QUEUE_SIZE_MAX * sizeof (iwk_cmd_t),
1350 	    &cmd_dma_attr, &iwk_dma_accattr,
1351 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1352 	    &ring->dma_cmd);
1353 	if (err != DDI_SUCCESS) {
1354 		cmn_err(CE_WARN, "dma alloc tx ring cmd[%d] "
1355 		    "failed\n", qid);
1356 		goto fail;
1357 	}
1358 	dma_p = &ring->dma_cmd;
1359 	IWK_DBG((IWK_DEBUG_DMA, "tx cmd[ncookies:%d addr:%lx size:%lx]\n",
1360 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1361 	    dma_p->cookie.dmac_size));
1362 
1363 	cmd_h = (iwk_cmd_t *)ring->dma_cmd.mem_va;
1364 	paddr_cmd_h = ring->dma_cmd.cookie.dmac_address;
1365 
1366 	/*
1367 	 * Allocate Tx buffers.
1368 	 */
1369 	ring->data = kmem_zalloc(sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX,
1370 	    KM_NOSLEEP);
1371 	if (ring->data == NULL) {
1372 		cmn_err(CE_WARN, "could not allocate tx data slots\n");
1373 		goto fail;
1374 	}
1375 
1376 	for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) {
1377 		data = &ring->data[i];
1378 		err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1379 		    &tx_buffer_dma_attr, &iwk_dma_accattr,
1380 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
1381 		    &data->dma_data);
1382 		if (err != DDI_SUCCESS) {
1383 			cmn_err(CE_WARN, "dma alloc tx ring "
1384 			    "buf[%d] failed\n", i);
1385 			goto fail;
1386 		}
1387 
1388 		data->desc = desc_h + i;
1389 		data->paddr_desc = paddr_desc_h +
1390 		    _PTRDIFF(data->desc, desc_h);
1391 		data->cmd = cmd_h +  i; /* (i % slots); */
1392 		/* ((i % slots) * sizeof (iwk_cmd_t)); */
1393 		data->paddr_cmd = paddr_cmd_h +
1394 		    _PTRDIFF(data->cmd, cmd_h);
1395 	}
1396 	dma_p = &ring->data[0].dma_data;
1397 	IWK_DBG((IWK_DEBUG_DMA, "tx buffer[0][ncookies:%d addr:%lx "
1398 	    "size:%lx]\n",
1399 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1400 	    dma_p->cookie.dmac_size));
1401 
1402 	return (err);
1403 
1404 fail:
1405 	if (ring->data)
1406 		kmem_free(ring->data,
1407 		    sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX);
1408 	iwk_free_tx_ring(sc, ring);
1409 	return (err);
1410 }
1411 
1412 static void
1413 iwk_reset_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring)
1414 {
1415 	iwk_tx_data_t *data;
1416 	int i, n;
1417 
1418 	iwk_mac_access_enter(sc);
1419 
1420 	IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0);
1421 	for (n = 0; n < 200; n++) {
1422 		if (IWK_READ(sc, IWK_FH_TSSR_TX_STATUS_REG) &
1423 		    IWK_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid))
1424 			break;
1425 		DELAY(10);
1426 	}
1427 	if (n == 200) {
1428 		IWK_DBG((IWK_DEBUG_DMA, "timeout reset tx ring %d\n",
1429 		    ring->qid));
1430 	}
1431 	iwk_mac_access_exit(sc);
1432 
1433 	for (i = 0; i < ring->count; i++) {
1434 		data = &ring->data[i];
1435 		IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
1436 	}
1437 
1438 	ring->queued = 0;
1439 	ring->cur = 0;
1440 }
1441 
1442 /*ARGSUSED*/
1443 static void
1444 iwk_free_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring)
1445 {
1446 	int i;
1447 
1448 	if (ring->dma_desc.dma_hdl != NULL)
1449 		IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1450 	iwk_free_dma_mem(&ring->dma_desc);
1451 
1452 	if (ring->dma_cmd.dma_hdl != NULL)
1453 		IWK_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV);
1454 	iwk_free_dma_mem(&ring->dma_cmd);
1455 
1456 	if (ring->data != NULL) {
1457 		for (i = 0; i < ring->count; i++) {
1458 			if (ring->data[i].dma_data.dma_hdl)
1459 				IWK_DMA_SYNC(ring->data[i].dma_data,
1460 				    DDI_DMA_SYNC_FORDEV);
1461 			iwk_free_dma_mem(&ring->data[i].dma_data);
1462 		}
1463 		kmem_free(ring->data, ring->count * sizeof (iwk_tx_data_t));
1464 	}
1465 }
1466 
1467 static int
1468 iwk_ring_init(iwk_sc_t *sc)
1469 {
1470 	int i, err = DDI_SUCCESS;
1471 
1472 	for (i = 0; i < IWK_NUM_QUEUES; i++) {
1473 		if (i == IWK_CMD_QUEUE_NUM)
1474 			continue;
1475 		err = iwk_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS,
1476 		    i);
1477 		if (err != DDI_SUCCESS)
1478 			goto fail;
1479 	}
1480 	err = iwk_alloc_tx_ring(sc, &sc->sc_txq[IWK_CMD_QUEUE_NUM],
1481 	    TFD_CMD_SLOTS, IWK_CMD_QUEUE_NUM);
1482 	if (err != DDI_SUCCESS)
1483 		goto fail;
1484 	err = iwk_alloc_rx_ring(sc);
1485 	if (err != DDI_SUCCESS)
1486 		goto fail;
1487 	return (err);
1488 
1489 fail:
1490 	return (err);
1491 }
1492 
1493 static void
1494 iwk_ring_free(iwk_sc_t *sc)
1495 {
1496 	int i = IWK_NUM_QUEUES;
1497 
1498 	iwk_free_rx_ring(sc);
1499 	while (--i >= 0) {
1500 		iwk_free_tx_ring(sc, &sc->sc_txq[i]);
1501 	}
1502 }
1503 
1504 /* ARGSUSED */
1505 static ieee80211_node_t *
1506 iwk_node_alloc(ieee80211com_t *ic)
1507 {
1508 	iwk_amrr_t *amrr;
1509 
1510 	amrr = kmem_zalloc(sizeof (iwk_amrr_t), KM_SLEEP);
1511 	if (amrr != NULL)
1512 		iwk_amrr_init(amrr);
1513 	return (&amrr->in);
1514 }
1515 
1516 static void
1517 iwk_node_free(ieee80211_node_t *in)
1518 {
1519 	ieee80211com_t *ic = in->in_ic;
1520 
1521 	ic->ic_node_cleanup(in);
1522 	if (in->in_wpa_ie != NULL)
1523 		ieee80211_free(in->in_wpa_ie);
1524 	kmem_free(in, sizeof (iwk_amrr_t));
1525 }
1526 
1527 /*ARGSUSED*/
1528 static int
1529 iwk_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1530 {
1531 	iwk_sc_t *sc = (iwk_sc_t *)ic;
1532 	ieee80211_node_t *in = ic->ic_bss;
1533 	enum ieee80211_state ostate = ic->ic_state;
1534 	int i, err = IWK_SUCCESS;
1535 
1536 	mutex_enter(&sc->sc_glock);
1537 	switch (nstate) {
1538 	case IEEE80211_S_SCAN:
1539 		switch (ostate) {
1540 		case IEEE80211_S_INIT:
1541 		{
1542 			iwk_add_sta_t node;
1543 
1544 			sc->sc_flags |= IWK_F_SCANNING;
1545 			iwk_set_led(sc, 2, 10, 2);
1546 
1547 			/*
1548 			 * clear association to receive beacons from
1549 			 * all BSS'es
1550 			 */
1551 			sc->sc_config.assoc_id = 0;
1552 			sc->sc_config.filter_flags &=
1553 			    ~LE_32(RXON_FILTER_ASSOC_MSK);
1554 
1555 			IWK_DBG((IWK_DEBUG_80211, "config chan %d "
1556 			    "flags %x filter_flags %x\n", sc->sc_config.chan,
1557 			    sc->sc_config.flags, sc->sc_config.filter_flags));
1558 
1559 			err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
1560 			    sizeof (iwk_rxon_cmd_t), 1);
1561 			if (err != IWK_SUCCESS) {
1562 				cmn_err(CE_WARN,
1563 				    "could not clear association\n");
1564 				sc->sc_flags &= ~IWK_F_SCANNING;
1565 				mutex_exit(&sc->sc_glock);
1566 				return (err);
1567 			}
1568 
1569 			/* add broadcast node to send probe request */
1570 			(void) memset(&node, 0, sizeof (node));
1571 			(void) memset(&node.bssid, 0xff, IEEE80211_ADDR_LEN);
1572 			node.id = IWK_BROADCAST_ID;
1573 			err = iwk_cmd(sc, REPLY_ADD_STA, &node,
1574 			    sizeof (node), 1);
1575 			if (err != IWK_SUCCESS) {
1576 				cmn_err(CE_WARN, "could not add "
1577 				    "broadcast node\n");
1578 				sc->sc_flags &= ~IWK_F_SCANNING;
1579 				mutex_exit(&sc->sc_glock);
1580 				return (err);
1581 			}
1582 			break;
1583 		}
1584 		case IEEE80211_S_SCAN:
1585 			mutex_exit(&sc->sc_glock);
1586 			/* step to next channel before actual FW scan */
1587 			err = sc->sc_newstate(ic, nstate, arg);
1588 			mutex_enter(&sc->sc_glock);
1589 			if ((err != 0) || ((err = iwk_scan(sc)) != 0)) {
1590 				cmn_err(CE_WARN,
1591 				    "could not initiate scan\n");
1592 				sc->sc_flags &= ~IWK_F_SCANNING;
1593 				ieee80211_cancel_scan(ic);
1594 			}
1595 			mutex_exit(&sc->sc_glock);
1596 			return (err);
1597 		default:
1598 			break;
1599 
1600 		}
1601 		sc->sc_clk = 0;
1602 		break;
1603 
1604 	case IEEE80211_S_AUTH:
1605 		if (ostate == IEEE80211_S_SCAN) {
1606 			sc->sc_flags &= ~IWK_F_SCANNING;
1607 		}
1608 
1609 		/* reset state to handle reassociations correctly */
1610 		sc->sc_config.assoc_id = 0;
1611 		sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
1612 
1613 		/*
1614 		 * before sending authentication and association request frame,
1615 		 * we need do something in the hardware, such as setting the
1616 		 * channel same to the target AP...
1617 		 */
1618 		if ((err = iwk_hw_set_before_auth(sc)) != 0) {
1619 			cmn_err(CE_WARN, "could not setup firmware for "
1620 			    "authentication\n");
1621 			mutex_exit(&sc->sc_glock);
1622 			return (err);
1623 		}
1624 		break;
1625 
1626 	case IEEE80211_S_RUN:
1627 		if (ostate == IEEE80211_S_SCAN) {
1628 			sc->sc_flags &= ~IWK_F_SCANNING;
1629 		}
1630 
1631 		if (ic->ic_opmode == IEEE80211_M_MONITOR) {
1632 			/* let LED blink when monitoring */
1633 			iwk_set_led(sc, 2, 10, 10);
1634 			break;
1635 		}
1636 		IWK_DBG((IWK_DEBUG_80211, "iwk: associated."));
1637 
1638 		/* IBSS mode */
1639 		if (ic->ic_opmode == IEEE80211_M_IBSS) {
1640 			/*
1641 			 * clean all nodes in ibss node table
1642 			 * in order to be consistent with hardware
1643 			 */
1644 			err = iwk_run_state_config_ibss(ic);
1645 			if (err != IWK_SUCCESS) {
1646 				cmn_err(CE_WARN, "iwk_newstate(): "
1647 				    "failed to update configuration "
1648 				    "in IBSS mode\n");
1649 				mutex_exit(&sc->sc_glock);
1650 				return (err);
1651 			}
1652 		}
1653 
1654 		/* none IBSS mode */
1655 		if (ic->ic_opmode != IEEE80211_M_IBSS) {
1656 			/* update adapter's configuration */
1657 			err = iwk_run_state_config_sta(ic);
1658 			if (err != IWK_SUCCESS) {
1659 				cmn_err(CE_WARN, "iwk_newstate(): "
1660 				    "failed to update configuration "
1661 				    "in none IBSS mode\n");
1662 				mutex_exit(&sc->sc_glock);
1663 				return (err);
1664 			}
1665 		}
1666 
1667 		/* obtain current temperature of chipset */
1668 		sc->sc_tempera = iwk_curr_tempera(sc);
1669 
1670 		/*
1671 		 * make Tx power calibration to determine
1672 		 * the gains of DSP and radio
1673 		 */
1674 		err = iwk_tx_power_calibration(sc);
1675 		if (err) {
1676 			cmn_err(CE_WARN, "iwk_newstate(): "
1677 			    "failed to set tx power table\n");
1678 			mutex_exit(&sc->sc_glock);
1679 			return (err);
1680 		}
1681 
1682 		if (ic->ic_opmode == IEEE80211_M_IBSS) {
1683 
1684 			/*
1685 			 * allocate and transmit beacon frames
1686 			 */
1687 			err = iwk_start_tx_beacon(ic);
1688 			if (err != IWK_SUCCESS) {
1689 				cmn_err(CE_WARN, "iwk_newstate(): "
1690 				    "can't transmit beacon frames\n");
1691 				mutex_exit(&sc->sc_glock);
1692 				return (err);
1693 			}
1694 		}
1695 
1696 		/* start automatic rate control */
1697 		mutex_enter(&sc->sc_mt_lock);
1698 		if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
1699 			sc->sc_flags |= IWK_F_RATE_AUTO_CTL;
1700 			/* set rate to some reasonable initial value */
1701 			i = in->in_rates.ir_nrates - 1;
1702 			while (i > 0 && IEEE80211_RATE(i) > 72)
1703 				i--;
1704 			in->in_txrate = i;
1705 		} else {
1706 			sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL;
1707 		}
1708 		mutex_exit(&sc->sc_mt_lock);
1709 
1710 		/* set LED on after associated */
1711 		iwk_set_led(sc, 2, 0, 1);
1712 		break;
1713 
1714 	case IEEE80211_S_INIT:
1715 		if (ostate == IEEE80211_S_SCAN) {
1716 			sc->sc_flags &= ~IWK_F_SCANNING;
1717 		}
1718 
1719 		/* set LED off after init */
1720 		iwk_set_led(sc, 2, 1, 0);
1721 		break;
1722 	case IEEE80211_S_ASSOC:
1723 		if (ostate == IEEE80211_S_SCAN) {
1724 			sc->sc_flags &= ~IWK_F_SCANNING;
1725 		}
1726 
1727 		break;
1728 	}
1729 
1730 	mutex_exit(&sc->sc_glock);
1731 
1732 	err = sc->sc_newstate(ic, nstate, arg);
1733 
1734 	if (nstate == IEEE80211_S_RUN) {
1735 
1736 		mutex_enter(&sc->sc_glock);
1737 
1738 		/*
1739 		 * make initialization for Receiver
1740 		 * sensitivity calibration
1741 		 */
1742 		err = iwk_rx_sens_init(sc);
1743 		if (err) {
1744 			cmn_err(CE_WARN, "iwk_newstate(): "
1745 			    "failed to init RX sensitivity\n");
1746 			mutex_exit(&sc->sc_glock);
1747 			return (err);
1748 		}
1749 
1750 		/* make initialization for Receiver gain balance */
1751 		err = iwk_rxgain_diff_init(sc);
1752 		if (err) {
1753 			cmn_err(CE_WARN, "iwk_newstate(): "
1754 			    "failed to init phy calibration\n");
1755 			mutex_exit(&sc->sc_glock);
1756 			return (err);
1757 		}
1758 
1759 		mutex_exit(&sc->sc_glock);
1760 
1761 	}
1762 
1763 	return (err);
1764 }
1765 
1766 /*ARGSUSED*/
1767 static int iwk_key_set(ieee80211com_t *ic, const struct ieee80211_key *k,
1768     const uint8_t mac[IEEE80211_ADDR_LEN])
1769 {
1770 	iwk_sc_t *sc = (iwk_sc_t *)ic;
1771 	iwk_add_sta_t node;
1772 	int err;
1773 	uint8_t index1;
1774 
1775 	switch (k->wk_cipher->ic_cipher) {
1776 	case IEEE80211_CIPHER_WEP:
1777 	case IEEE80211_CIPHER_TKIP:
1778 		return (1); /* sofeware do it. */
1779 	case IEEE80211_CIPHER_AES_CCM:
1780 		break;
1781 	default:
1782 		return (0);
1783 	}
1784 	sc->sc_config.filter_flags &= ~(RXON_FILTER_DIS_DECRYPT_MSK |
1785 	    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
1786 
1787 	mutex_enter(&sc->sc_glock);
1788 
1789 	/* update ap/multicast node */
1790 	(void) memset(&node, 0, sizeof (node));
1791 	if (IEEE80211_IS_MULTICAST(mac)) {
1792 		(void) memset(node.bssid, 0xff, 6);
1793 		node.id = IWK_BROADCAST_ID;
1794 	} else if (ic->ic_opmode == IEEE80211_M_IBSS) {
1795 		mutex_exit(&sc->sc_glock);
1796 		mutex_enter(&sc->sc_ibss.node_tb_lock);
1797 
1798 		/*
1799 		 * search for node in ibss node table
1800 		 */
1801 		for (index1 = IWK_STA_ID; index1 < IWK_STATION_COUNT;
1802 		    index1++) {
1803 			if (sc->sc_ibss.ibss_node_tb[index1].used &&
1804 			    IEEE80211_ADDR_EQ(sc->sc_ibss.
1805 			    ibss_node_tb[index1].node.bssid,
1806 			    mac)) {
1807 				break;
1808 			}
1809 		}
1810 		if (index1 >= IWK_BROADCAST_ID) {
1811 			cmn_err(CE_WARN, "iwk_key_set(): "
1812 			    "have no this node in hardware node table\n");
1813 			mutex_exit(&sc->sc_ibss.node_tb_lock);
1814 			return (0);
1815 		} else {
1816 			/*
1817 			 * configure key for given node in hardware
1818 			 */
1819 			if (k->wk_flags & IEEE80211_KEY_XMIT) {
1820 				sc->sc_ibss.ibss_node_tb[index1].
1821 				    node.key_flags = 0;
1822 				sc->sc_ibss.ibss_node_tb[index1].
1823 				    node.keyp = k->wk_keyix;
1824 			} else {
1825 				sc->sc_ibss.ibss_node_tb[index1].
1826 				    node.key_flags = (1 << 14);
1827 				sc->sc_ibss.ibss_node_tb[index1].
1828 				    node.keyp = k->wk_keyix + 4;
1829 			}
1830 
1831 			(void) memcpy(sc->sc_ibss.ibss_node_tb[index1].node.key,
1832 			    k->wk_key, k->wk_keylen);
1833 			sc->sc_ibss.ibss_node_tb[index1].node.key_flags |=
1834 			    (STA_KEY_FLG_CCMP | (1 << 3) | (k->wk_keyix << 8));
1835 			sc->sc_ibss.ibss_node_tb[index1].node.sta_mask =
1836 			    STA_MODIFY_KEY_MASK;
1837 			sc->sc_ibss.ibss_node_tb[index1].node.control = 1;
1838 
1839 			mutex_enter(&sc->sc_glock);
1840 			err = iwk_cmd(sc, REPLY_ADD_STA,
1841 			    &sc->sc_ibss.ibss_node_tb[index1].node,
1842 			    sizeof (iwk_add_sta_t), 1);
1843 			if (err != IWK_SUCCESS) {
1844 				cmn_err(CE_WARN, "iwk_key_set(): "
1845 				    "failed to update IBSS node in hardware\n");
1846 				mutex_exit(&sc->sc_glock);
1847 				mutex_exit(&sc->sc_ibss.node_tb_lock);
1848 				return (0);
1849 			}
1850 			mutex_exit(&sc->sc_glock);
1851 		}
1852 		mutex_exit(&sc->sc_ibss.node_tb_lock);
1853 		return (1);
1854 	} else {
1855 		IEEE80211_ADDR_COPY(node.bssid, ic->ic_bss->in_bssid);
1856 		node.id = IWK_AP_ID;
1857 	}
1858 	if (k->wk_flags & IEEE80211_KEY_XMIT) {
1859 		node.key_flags = 0;
1860 		node.keyp = k->wk_keyix;
1861 	} else {
1862 		node.key_flags = (1 << 14);
1863 		node.keyp = k->wk_keyix + 4;
1864 	}
1865 	(void) memcpy(node.key, k->wk_key, k->wk_keylen);
1866 	node.key_flags |= (STA_KEY_FLG_CCMP | (1 << 3) | (k->wk_keyix << 8));
1867 	node.sta_mask = STA_MODIFY_KEY_MASK;
1868 	node.control = 1;
1869 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
1870 	if (err != IWK_SUCCESS) {
1871 		cmn_err(CE_WARN, "iwk_key_set():"
1872 		    "failed to update ap node\n");
1873 		mutex_exit(&sc->sc_glock);
1874 		return (0);
1875 	}
1876 	mutex_exit(&sc->sc_glock);
1877 	return (1);
1878 }
1879 
1880 /*
1881  * exclusive access to mac begin.
1882  */
1883 static void
1884 iwk_mac_access_enter(iwk_sc_t *sc)
1885 {
1886 	uint32_t tmp;
1887 	int n;
1888 
1889 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
1890 	IWK_WRITE(sc, CSR_GP_CNTRL,
1891 	    tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1892 
1893 	/* wait until we succeed */
1894 	for (n = 0; n < 1000; n++) {
1895 		if ((IWK_READ(sc, CSR_GP_CNTRL) &
1896 		    (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1897 		    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) ==
1898 		    CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN)
1899 			break;
1900 		DELAY(10);
1901 	}
1902 	if (n == 1000)
1903 		IWK_DBG((IWK_DEBUG_PIO, "could not lock memory\n"));
1904 }
1905 
1906 /*
1907  * exclusive access to mac end.
1908  */
1909 static void
1910 iwk_mac_access_exit(iwk_sc_t *sc)
1911 {
1912 	uint32_t tmp = IWK_READ(sc, CSR_GP_CNTRL);
1913 	IWK_WRITE(sc, CSR_GP_CNTRL,
1914 	    tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1915 }
1916 
1917 static uint32_t
1918 iwk_mem_read(iwk_sc_t *sc, uint32_t addr)
1919 {
1920 	IWK_WRITE(sc, HBUS_TARG_MEM_RADDR, addr);
1921 	return (IWK_READ(sc, HBUS_TARG_MEM_RDAT));
1922 }
1923 
1924 static void
1925 iwk_mem_write(iwk_sc_t *sc, uint32_t addr, uint32_t data)
1926 {
1927 	IWK_WRITE(sc, HBUS_TARG_MEM_WADDR, addr);
1928 	IWK_WRITE(sc, HBUS_TARG_MEM_WDAT, data);
1929 }
1930 
1931 static uint32_t
1932 iwk_reg_read(iwk_sc_t *sc, uint32_t addr)
1933 {
1934 	IWK_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24));
1935 	return (IWK_READ(sc, HBUS_TARG_PRPH_RDAT));
1936 }
1937 
1938 static void
1939 iwk_reg_write(iwk_sc_t *sc, uint32_t addr, uint32_t data)
1940 {
1941 	IWK_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24));
1942 	IWK_WRITE(sc, HBUS_TARG_PRPH_WDAT, data);
1943 }
1944 
1945 static void
1946 iwk_reg_write_region_4(iwk_sc_t *sc, uint32_t addr,
1947     uint32_t *data, int wlen)
1948 {
1949 	for (; wlen > 0; wlen--, data++, addr += 4)
1950 		iwk_reg_write(sc, addr, *data);
1951 }
1952 
1953 
1954 /*
1955  * ucode load/initialization steps:
1956  * 1)  load Bootstrap State Machine (BSM) with "bootstrap" uCode image.
1957  * BSM contains a small memory that *always* stays powered up, so it can
1958  * retain the bootstrap program even when the card is in a power-saving
1959  * power-down state.  The BSM loads the small program into ARC processor's
1960  * instruction memory when triggered by power-up.
1961  * 2)  load Initialize image via bootstrap program.
1962  * The Initialize image sets up regulatory and calibration data for the
1963  * Runtime/Protocol uCode. This sends a REPLY_ALIVE notification when completed.
1964  * The 4965 reply contains calibration data for temperature, voltage and tx gain
1965  * correction.
1966  */
1967 static int
1968 iwk_load_firmware(iwk_sc_t *sc)
1969 {
1970 	uint32_t *boot_fw = (uint32_t *)sc->sc_boot;
1971 	uint32_t size = sc->sc_hdr->bootsz;
1972 	int n, err = IWK_SUCCESS;
1973 
1974 	/*
1975 	 * The physical address bit [4-35] of the initialize uCode.
1976 	 * In the initialize alive notify interrupt the physical address of
1977 	 * the runtime ucode will be set for loading.
1978 	 */
1979 	iwk_mac_access_enter(sc);
1980 
1981 	iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG,
1982 	    sc->sc_dma_fw_init_text.cookie.dmac_address >> 4);
1983 	iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG,
1984 	    sc->sc_dma_fw_init_data.cookie.dmac_address >> 4);
1985 	iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG,
1986 	    sc->sc_dma_fw_init_text.cookie.dmac_size);
1987 	iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG,
1988 	    sc->sc_dma_fw_init_data.cookie.dmac_size);
1989 
1990 	/* load bootstrap code into BSM memory */
1991 	iwk_reg_write_region_4(sc, BSM_SRAM_LOWER_BOUND, boot_fw,
1992 	    size / sizeof (uint32_t));
1993 
1994 	iwk_reg_write(sc, BSM_WR_MEM_SRC_REG, 0);
1995 	iwk_reg_write(sc, BSM_WR_MEM_DST_REG, RTC_INST_LOWER_BOUND);
1996 	iwk_reg_write(sc, BSM_WR_DWCOUNT_REG, size / sizeof (uint32_t));
1997 
1998 	/*
1999 	 * prepare to load initialize uCode
2000 	 */
2001 	iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
2002 
2003 	/* wait while the adapter is busy loading the firmware */
2004 	for (n = 0; n < 1000; n++) {
2005 		if (!(iwk_reg_read(sc, BSM_WR_CTRL_REG) &
2006 		    BSM_WR_CTRL_REG_BIT_START))
2007 			break;
2008 		DELAY(10);
2009 	}
2010 	if (n == 1000) {
2011 		cmn_err(CE_WARN, "timeout transferring firmware\n");
2012 		err = ETIMEDOUT;
2013 		return (err);
2014 	}
2015 
2016 	/* for future power-save mode use */
2017 	iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
2018 
2019 	iwk_mac_access_exit(sc);
2020 
2021 	return (err);
2022 }
2023 
2024 /*ARGSUSED*/
2025 static void
2026 iwk_rx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data)
2027 {
2028 	ieee80211com_t *ic = &sc->sc_ic;
2029 	iwk_rx_ring_t *ring = &sc->sc_rxq;
2030 	iwk_rx_phy_res_t *stat;
2031 	ieee80211_node_t *in;
2032 	uint32_t *tail;
2033 	struct ieee80211_frame *wh;
2034 	mblk_t *mp;
2035 	uint16_t len, rssi, mrssi, agc;
2036 	int16_t t;
2037 	uint32_t ants, i;
2038 	struct iwk_rx_non_cfg_phy *phyinfo;
2039 
2040 	/* assuming not 11n here. cope with 11n in phase-II */
2041 	stat = (iwk_rx_phy_res_t *)(desc + 1);
2042 	if (stat->cfg_phy_cnt > 20) {
2043 		return;
2044 	}
2045 
2046 	phyinfo = (struct iwk_rx_non_cfg_phy *)stat->non_cfg_phy;
2047 	agc = (phyinfo->agc_info & IWK_AGC_DB_MASK) >> IWK_AGC_DB_POS;
2048 	mrssi = 0;
2049 	ants = (stat->phy_flags & RX_PHY_FLAGS_ANTENNAE_MASK) >>
2050 	    RX_PHY_FLAGS_ANTENNAE_OFFSET;
2051 	for (i = 0; i < 3; i++) {
2052 		if (ants & (1 << i))
2053 			mrssi = MAX(mrssi, phyinfo->rssi_info[i << 1]);
2054 	}
2055 	t = mrssi - agc - 44; /* t is the dBM value */
2056 	/*
2057 	 * convert dBm to percentage ???
2058 	 */
2059 	rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t))) /
2060 	    (75 * 75);
2061 	if (rssi > 100)
2062 		rssi = 100;
2063 	if (rssi < 1)
2064 		rssi = 1;
2065 	len = stat->byte_count;
2066 	tail = (uint32_t *)((uint8_t *)(stat + 1) + stat->cfg_phy_cnt + len);
2067 
2068 	IWK_DBG((IWK_DEBUG_RX, "rx intr: idx=%d phy_len=%x len=%d "
2069 	    "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x "
2070 	    "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat),
2071 	    len, stat->rate.r.s.rate, stat->channel,
2072 	    LE_32(stat->timestampl), stat->non_cfg_phy_cnt,
2073 	    stat->cfg_phy_cnt, LE_32(*tail)));
2074 
2075 	if ((len < 16) || (len > sc->sc_dmabuf_sz)) {
2076 		IWK_DBG((IWK_DEBUG_RX, "rx frame oversize\n"));
2077 		return;
2078 	}
2079 
2080 	/*
2081 	 * discard Rx frames with bad CRC
2082 	 */
2083 	if ((LE_32(*tail) &
2084 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) !=
2085 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) {
2086 		IWK_DBG((IWK_DEBUG_RX, "rx crc error tail: %x\n",
2087 		    LE_32(*tail)));
2088 		sc->sc_rx_err++;
2089 		return;
2090 	}
2091 
2092 	wh = (struct ieee80211_frame *)
2093 	    ((uint8_t *)(stat + 1)+ stat->cfg_phy_cnt);
2094 	if (*(uint8_t *)wh == IEEE80211_FC0_SUBTYPE_ASSOC_RESP) {
2095 		sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2);
2096 		IWK_DBG((IWK_DEBUG_RX, "rx : association id = %x\n",
2097 		    sc->sc_assoc_id));
2098 	}
2099 #ifdef DEBUG
2100 	if (iwk_dbg_flags & IWK_DEBUG_RX)
2101 		ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0);
2102 #endif
2103 	in = ieee80211_find_rxnode(ic, wh);
2104 	mp = allocb(len, BPRI_MED);
2105 	if (mp) {
2106 		(void) memcpy(mp->b_wptr, wh, len);
2107 		mp->b_wptr += len;
2108 
2109 		/* send the frame to the 802.11 layer */
2110 		(void) ieee80211_input(ic, mp, in, rssi, 0);
2111 	} else {
2112 		sc->sc_rx_nobuf++;
2113 		IWK_DBG((IWK_DEBUG_RX,
2114 		    "iwk_rx_intr(): alloc rx buf failed\n"));
2115 	}
2116 	/* release node reference */
2117 	ieee80211_free_node(in);
2118 }
2119 
2120 /*ARGSUSED*/
2121 static void
2122 iwk_tx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data)
2123 {
2124 	ieee80211com_t *ic = &sc->sc_ic;
2125 	iwk_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3];
2126 	iwk_tx_stat_t *stat = (iwk_tx_stat_t *)(desc + 1);
2127 	iwk_amrr_t *amrr = (iwk_amrr_t *)ic->ic_bss;
2128 
2129 	IWK_DBG((IWK_DEBUG_TX, "tx done: qid=%d idx=%d"
2130 	    " retries=%d frame_count=%x nkill=%d "
2131 	    "rate=%x duration=%d status=%x\n",
2132 	    desc->hdr.qid, desc->hdr.idx, stat->ntries, stat->frame_count,
2133 	    stat->bt_kill_count, stat->rate.r.s.rate,
2134 	    LE_32(stat->duration), LE_32(stat->status)));
2135 
2136 	amrr->txcnt++;
2137 	IWK_DBG((IWK_DEBUG_RATECTL, "tx: %d cnt\n", amrr->txcnt));
2138 	if (stat->ntries > 0) {
2139 		amrr->retrycnt++;
2140 		sc->sc_tx_retries++;
2141 		IWK_DBG((IWK_DEBUG_TX, "tx: %d retries\n",
2142 		    sc->sc_tx_retries));
2143 	}
2144 
2145 	sc->sc_tx_timer = 0;
2146 
2147 	mutex_enter(&sc->sc_tx_lock);
2148 	ring->queued--;
2149 	if (ring->queued < 0)
2150 		ring->queued = 0;
2151 	if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count << 3))) {
2152 		sc->sc_need_reschedule = 0;
2153 		mutex_exit(&sc->sc_tx_lock);
2154 		mac_tx_update(ic->ic_mach);
2155 		mutex_enter(&sc->sc_tx_lock);
2156 	}
2157 	mutex_exit(&sc->sc_tx_lock);
2158 }
2159 
2160 static void
2161 iwk_cmd_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc)
2162 {
2163 	if ((desc->hdr.qid & 7) != 4) {
2164 		return;
2165 	}
2166 	mutex_enter(&sc->sc_glock);
2167 	sc->sc_flags |= IWK_F_CMD_DONE;
2168 	cv_signal(&sc->sc_cmd_cv);
2169 	mutex_exit(&sc->sc_glock);
2170 	IWK_DBG((IWK_DEBUG_CMD, "rx cmd: "
2171 	    "qid=%x idx=%d flags=%x type=0x%x\n",
2172 	    desc->hdr.qid, desc->hdr.idx, desc->hdr.flags,
2173 	    desc->hdr.type));
2174 }
2175 
2176 static void
2177 iwk_ucode_alive(iwk_sc_t *sc, iwk_rx_desc_t *desc)
2178 {
2179 	uint32_t base, i;
2180 	struct iwk_alive_resp *ar =
2181 	    (struct iwk_alive_resp *)(desc + 1);
2182 
2183 	/* the microcontroller is ready */
2184 	IWK_DBG((IWK_DEBUG_FW,
2185 	    "microcode alive notification minor: %x major: %x type:"
2186 	    " %x subtype: %x\n",
2187 	    ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype));
2188 
2189 	if (LE_32(ar->is_valid) != UCODE_VALID_OK) {
2190 		IWK_DBG((IWK_DEBUG_FW,
2191 		    "microcontroller initialization failed\n"));
2192 	}
2193 	if (ar->ver_subtype == INITIALIZE_SUBTYPE) {
2194 		IWK_DBG((IWK_DEBUG_FW,
2195 		    "initialization alive received.\n"));
2196 		(void) memcpy(&sc->sc_card_alive_init, ar,
2197 		    sizeof (struct iwk_init_alive_resp));
2198 		/* XXX get temperature */
2199 		iwk_mac_access_enter(sc);
2200 		iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG,
2201 		    sc->sc_dma_fw_text.cookie.dmac_address >> 4);
2202 		iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG,
2203 		    sc->sc_dma_fw_data_bak.cookie.dmac_address >> 4);
2204 		iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG,
2205 		    sc->sc_dma_fw_data.cookie.dmac_size);
2206 		iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG,
2207 		    sc->sc_dma_fw_text.cookie.dmac_size | 0x80000000);
2208 		iwk_mac_access_exit(sc);
2209 	} else {
2210 		IWK_DBG((IWK_DEBUG_FW, "runtime alive received.\n"));
2211 		(void) memcpy(&sc->sc_card_alive_run, ar,
2212 		    sizeof (struct iwk_alive_resp));
2213 
2214 		/*
2215 		 * Init SCD related registers to make Tx work. XXX
2216 		 */
2217 		iwk_mac_access_enter(sc);
2218 
2219 		/* read sram address of data base */
2220 		sc->sc_scd_base = iwk_reg_read(sc, SCD_SRAM_BASE_ADDR);
2221 
2222 		/* clear and init SCD_CONTEXT_DATA_OFFSET area. 128 bytes */
2223 		for (base = sc->sc_scd_base + SCD_CONTEXT_DATA_OFFSET, i = 0;
2224 		    i < 128; i += 4)
2225 			iwk_mem_write(sc, base + i, 0);
2226 
2227 		/* clear and init SCD_TX_STTS_BITMAP_OFFSET area. 256 bytes */
2228 		for (base = sc->sc_scd_base + SCD_TX_STTS_BITMAP_OFFSET;
2229 		    i < 256; i += 4)
2230 			iwk_mem_write(sc, base + i, 0);
2231 
2232 		/* clear and init SCD_TRANSLATE_TBL_OFFSET area. 32 bytes */
2233 		for (base = sc->sc_scd_base + SCD_TRANSLATE_TBL_OFFSET;
2234 		    i < sizeof (uint16_t) * IWK_NUM_QUEUES; i += 4)
2235 			iwk_mem_write(sc, base + i, 0);
2236 
2237 		iwk_reg_write(sc, SCD_DRAM_BASE_ADDR,
2238 		    sc->sc_dma_sh.cookie.dmac_address >> 10);
2239 		iwk_reg_write(sc, SCD_QUEUECHAIN_SEL, 0);
2240 
2241 		/* initiate the tx queues */
2242 		for (i = 0; i < IWK_NUM_QUEUES; i++) {
2243 			iwk_reg_write(sc, SCD_QUEUE_RDPTR(i), 0);
2244 			IWK_WRITE(sc, HBUS_TARG_WRPTR, (i << 8));
2245 			iwk_mem_write(sc, sc->sc_scd_base +
2246 			    SCD_CONTEXT_QUEUE_OFFSET(i),
2247 			    (SCD_WIN_SIZE & 0x7f));
2248 			iwk_mem_write(sc, sc->sc_scd_base +
2249 			    SCD_CONTEXT_QUEUE_OFFSET(i) + sizeof (uint32_t),
2250 			    (SCD_FRAME_LIMIT & 0x7f) << 16);
2251 		}
2252 		/* interrupt enable on each queue0-7 */
2253 		iwk_reg_write(sc, SCD_INTERRUPT_MASK,
2254 		    (1 << IWK_NUM_QUEUES) - 1);
2255 		/* enable  each channel 0-7 */
2256 		iwk_reg_write(sc, SCD_TXFACT,
2257 		    SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
2258 		/*
2259 		 * queue 0-7 maps to FIFO 0-7 and
2260 		 * all queues work under FIFO mode (none-scheduler-ack)
2261 		 */
2262 		for (i = 0; i < 7; i++) {
2263 			iwk_reg_write(sc,
2264 			    SCD_QUEUE_STATUS_BITS(i),
2265 			    (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
2266 			    (i << SCD_QUEUE_STTS_REG_POS_TXF)|
2267 			    SCD_QUEUE_STTS_REG_MSK);
2268 		}
2269 		iwk_mac_access_exit(sc);
2270 
2271 		sc->sc_flags |= IWK_F_FW_INIT;
2272 		cv_signal(&sc->sc_fw_cv);
2273 	}
2274 
2275 }
2276 
2277 static uint_t
2278 /* LINTED: argument unused in function: unused */
2279 iwk_rx_softintr(caddr_t arg, caddr_t unused)
2280 {
2281 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2282 	ieee80211com_t *ic = &sc->sc_ic;
2283 	iwk_rx_desc_t *desc;
2284 	iwk_rx_data_t *data;
2285 	uint32_t index;
2286 
2287 	mutex_enter(&sc->sc_glock);
2288 	if (sc->sc_rx_softint_pending != 1) {
2289 		mutex_exit(&sc->sc_glock);
2290 		return (DDI_INTR_UNCLAIMED);
2291 	}
2292 	/* disable interrupts */
2293 	IWK_WRITE(sc, CSR_INT_MASK, 0);
2294 	mutex_exit(&sc->sc_glock);
2295 
2296 	/*
2297 	 * firmware has moved the index of the rx queue, driver get it,
2298 	 * and deal with it.
2299 	 */
2300 	index = LE_32(sc->sc_shared->val0) & 0xfff;
2301 
2302 	while (sc->sc_rxq.cur != index) {
2303 		data = &sc->sc_rxq.data[sc->sc_rxq.cur];
2304 		desc = (iwk_rx_desc_t *)data->dma_data.mem_va;
2305 
2306 		IWK_DBG((IWK_DEBUG_INTR, "rx notification index = %d"
2307 		    " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n",
2308 		    index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx,
2309 		    desc->hdr.flags, desc->hdr.type, LE_32(desc->len)));
2310 
2311 		/* a command other than a tx need to be replied */
2312 		if (!(desc->hdr.qid & 0x80) &&
2313 		    (desc->hdr.type != REPLY_RX_PHY_CMD) &&
2314 		    (desc->hdr.type != REPLY_TX) &&
2315 		    (desc->hdr.type != REPLY_TX_PWR_TABLE_CMD) &&
2316 		    (desc->hdr.type != REPLY_PHY_CALIBRATION_CMD) &&
2317 		    (desc->hdr.type != SENSITIVITY_CMD))
2318 			iwk_cmd_intr(sc, desc);
2319 
2320 		switch (desc->hdr.type) {
2321 		case REPLY_4965_RX:
2322 			iwk_rx_intr(sc, desc, data);
2323 			break;
2324 
2325 		case REPLY_TX:
2326 			iwk_tx_intr(sc, desc, data);
2327 			break;
2328 
2329 		case REPLY_ALIVE:
2330 			iwk_ucode_alive(sc, desc);
2331 			break;
2332 
2333 		case CARD_STATE_NOTIFICATION:
2334 		{
2335 			uint32_t *status = (uint32_t *)(desc + 1);
2336 
2337 			IWK_DBG((IWK_DEBUG_RADIO, "state changed to %x\n",
2338 			    LE_32(*status)));
2339 
2340 			if (LE_32(*status) & 1) {
2341 				/*
2342 				 * the radio button has to be pushed(OFF). It
2343 				 * is considered as a hw error, the
2344 				 * iwk_thread() tries to recover it after the
2345 				 * button is pushed again(ON)
2346 				 */
2347 				cmn_err(CE_NOTE,
2348 				    "iwk_rx_softintr(): "
2349 				    "Radio transmitter is off\n");
2350 				sc->sc_ostate = sc->sc_ic.ic_state;
2351 				ieee80211_new_state(&sc->sc_ic,
2352 				    IEEE80211_S_INIT, -1);
2353 				sc->sc_flags |=
2354 				    (IWK_F_HW_ERR_RECOVER | IWK_F_RADIO_OFF);
2355 			}
2356 			break;
2357 		}
2358 		case SCAN_START_NOTIFICATION:
2359 		{
2360 			iwk_start_scan_t *scan =
2361 			    (iwk_start_scan_t *)(desc + 1);
2362 
2363 			IWK_DBG((IWK_DEBUG_SCAN,
2364 			    "scanning channel %d status %x\n",
2365 			    scan->chan, LE_32(scan->status)));
2366 
2367 			ic->ic_curchan = &ic->ic_sup_channels[scan->chan];
2368 			break;
2369 		}
2370 		case SCAN_COMPLETE_NOTIFICATION:
2371 		{
2372 			iwk_stop_scan_t *scan =
2373 			    (iwk_stop_scan_t *)(desc + 1);
2374 
2375 			IWK_DBG((IWK_DEBUG_SCAN,
2376 			    "completed channel %d (burst of %d) status %02x\n",
2377 			    scan->chan, scan->nchan, scan->status));
2378 
2379 			sc->sc_scan_pending++;
2380 			break;
2381 		}
2382 		case STATISTICS_NOTIFICATION:
2383 			/* handle statistics notification */
2384 			iwk_statistics_notify(sc, desc);
2385 			break;
2386 		}
2387 
2388 		sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE;
2389 	}
2390 
2391 	/*
2392 	 * driver dealt with what reveived in rx queue and tell the information
2393 	 * to the firmware.
2394 	 */
2395 	index = (index == 0) ? RX_QUEUE_SIZE - 1 : index - 1;
2396 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7));
2397 
2398 	mutex_enter(&sc->sc_glock);
2399 	/* re-enable interrupts */
2400 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2401 	sc->sc_rx_softint_pending = 0;
2402 	mutex_exit(&sc->sc_glock);
2403 
2404 	return (DDI_INTR_CLAIMED);
2405 }
2406 
2407 static uint_t
2408 /* LINTED: argument unused in function: unused */
2409 iwk_intr(caddr_t arg, caddr_t unused)
2410 {
2411 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2412 	uint32_t r, rfh;
2413 
2414 	mutex_enter(&sc->sc_glock);
2415 
2416 	if (sc->sc_flags & IWK_F_SUSPEND) {
2417 		mutex_exit(&sc->sc_glock);
2418 		return (DDI_INTR_UNCLAIMED);
2419 	}
2420 
2421 	r = IWK_READ(sc, CSR_INT);
2422 	if (r == 0 || r == 0xffffffff) {
2423 		mutex_exit(&sc->sc_glock);
2424 		return (DDI_INTR_UNCLAIMED);
2425 	}
2426 
2427 	IWK_DBG((IWK_DEBUG_INTR, "interrupt reg %x\n", r));
2428 
2429 	rfh = IWK_READ(sc, CSR_FH_INT_STATUS);
2430 	IWK_DBG((IWK_DEBUG_INTR, "FH interrupt reg %x\n", rfh));
2431 	/* disable interrupts */
2432 	IWK_WRITE(sc, CSR_INT_MASK, 0);
2433 	/* ack interrupts */
2434 	IWK_WRITE(sc, CSR_INT, r);
2435 	IWK_WRITE(sc, CSR_FH_INT_STATUS, rfh);
2436 
2437 	if (sc->sc_soft_hdl == NULL) {
2438 		mutex_exit(&sc->sc_glock);
2439 		return (DDI_INTR_CLAIMED);
2440 	}
2441 	if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) {
2442 		cmn_err(CE_WARN, "fatal firmware error\n");
2443 		mutex_exit(&sc->sc_glock);
2444 #ifdef DEBUG
2445 		/* dump event and error logs to dmesg */
2446 		iwk_write_error_log(sc);
2447 		iwk_write_event_log(sc);
2448 #endif /* DEBUG */
2449 		iwk_stop(sc);
2450 		sc->sc_ostate = sc->sc_ic.ic_state;
2451 		ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
2452 		sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
2453 		return (DDI_INTR_CLAIMED);
2454 	}
2455 
2456 	if (r & BIT_INT_RF_KILL) {
2457 		IWK_DBG((IWK_DEBUG_RADIO, "RF kill\n"));
2458 	}
2459 
2460 	if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) ||
2461 	    (rfh & FH_INT_RX_MASK)) {
2462 		sc->sc_rx_softint_pending = 1;
2463 		(void) ddi_intr_trigger_softint(sc->sc_soft_hdl, NULL);
2464 	}
2465 
2466 	if (r & BIT_INT_ALIVE)	{
2467 		IWK_DBG((IWK_DEBUG_FW, "firmware initialized.\n"));
2468 	}
2469 
2470 	/* re-enable interrupts */
2471 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2472 	mutex_exit(&sc->sc_glock);
2473 
2474 	return (DDI_INTR_CLAIMED);
2475 }
2476 
2477 static uint8_t
2478 iwk_rate_to_plcp(int rate)
2479 {
2480 	uint8_t ret;
2481 
2482 	switch (rate) {
2483 	/* CCK rates */
2484 	case 2:
2485 		ret = 0xa;
2486 		break;
2487 	case 4:
2488 		ret = 0x14;
2489 		break;
2490 	case 11:
2491 		ret = 0x37;
2492 		break;
2493 	case 22:
2494 		ret = 0x6e;
2495 		break;
2496 	/* OFDM rates */
2497 	case 12:
2498 		ret = 0xd;
2499 		break;
2500 	case 18:
2501 		ret = 0xf;
2502 		break;
2503 	case 24:
2504 		ret = 0x5;
2505 		break;
2506 	case 36:
2507 		ret = 0x7;
2508 		break;
2509 	case 48:
2510 		ret = 0x9;
2511 		break;
2512 	case 72:
2513 		ret = 0xb;
2514 		break;
2515 	case 96:
2516 		ret = 0x1;
2517 		break;
2518 	case 108:
2519 		ret = 0x3;
2520 		break;
2521 	default:
2522 		ret = 0;
2523 		break;
2524 	}
2525 	return (ret);
2526 }
2527 
2528 static mblk_t *
2529 iwk_m_tx(void *arg, mblk_t *mp)
2530 {
2531 	iwk_sc_t	*sc = (iwk_sc_t *)arg;
2532 	ieee80211com_t	*ic = &sc->sc_ic;
2533 	mblk_t			*next;
2534 
2535 	if (sc->sc_flags & IWK_F_SUSPEND) {
2536 		freemsgchain(mp);
2537 		return (NULL);
2538 	}
2539 
2540 	if (ic->ic_state != IEEE80211_S_RUN) {
2541 		freemsgchain(mp);
2542 		return (NULL);
2543 	}
2544 
2545 	while (mp != NULL) {
2546 		next = mp->b_next;
2547 		mp->b_next = NULL;
2548 		if (iwk_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) {
2549 			mp->b_next = next;
2550 			break;
2551 		}
2552 		mp = next;
2553 	}
2554 	return (mp);
2555 }
2556 
2557 /* ARGSUSED */
2558 static int
2559 iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
2560 {
2561 	iwk_sc_t *sc = (iwk_sc_t *)ic;
2562 	iwk_tx_ring_t *ring;
2563 	iwk_tx_desc_t *desc;
2564 	iwk_tx_data_t *data;
2565 	iwk_cmd_t *cmd;
2566 	iwk_tx_cmd_t *tx;
2567 	ieee80211_node_t *in;
2568 	struct ieee80211_frame *wh;
2569 	struct ieee80211_key *k = NULL;
2570 	mblk_t *m, *m0;
2571 	int rate, hdrlen, len, len0, mblen, off, err = IWK_SUCCESS;
2572 	uint16_t masks = 0;
2573 	uint8_t index, index1, index2;
2574 
2575 	ring = &sc->sc_txq[0];
2576 	data = &ring->data[ring->cur];
2577 	desc = data->desc;
2578 	cmd = data->cmd;
2579 	bzero(desc, sizeof (*desc));
2580 	bzero(cmd, sizeof (*cmd));
2581 
2582 	mutex_enter(&sc->sc_tx_lock);
2583 	if (sc->sc_flags & IWK_F_SUSPEND) {
2584 		mutex_exit(&sc->sc_tx_lock);
2585 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
2586 		    IEEE80211_FC0_TYPE_DATA) {
2587 			freemsg(mp);
2588 		}
2589 		err = IWK_FAIL;
2590 		goto exit;
2591 	}
2592 
2593 	if (ring->queued > ring->count - 64) {
2594 		IWK_DBG((IWK_DEBUG_TX, "iwk_send(): no txbuf\n"));
2595 		sc->sc_need_reschedule = 1;
2596 		mutex_exit(&sc->sc_tx_lock);
2597 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
2598 		    IEEE80211_FC0_TYPE_DATA) {
2599 			freemsg(mp);
2600 		}
2601 		sc->sc_tx_nobuf++;
2602 		err = IWK_FAIL;
2603 		goto exit;
2604 	}
2605 	mutex_exit(&sc->sc_tx_lock);
2606 
2607 	hdrlen = sizeof (struct ieee80211_frame);
2608 
2609 	m = allocb(msgdsize(mp) + 32, BPRI_MED);
2610 	if (m == NULL) { /* can not alloc buf, drop this package */
2611 		cmn_err(CE_WARN,
2612 		    "iwk_send(): failed to allocate msgbuf\n");
2613 		freemsg(mp);
2614 		err = IWK_SUCCESS;
2615 		goto exit;
2616 	}
2617 	for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) {
2618 		mblen = MBLKL(m0);
2619 		(void) memcpy(m->b_rptr + off, m0->b_rptr, mblen);
2620 		off += mblen;
2621 	}
2622 	m->b_wptr += off;
2623 	freemsg(mp);
2624 
2625 	wh = (struct ieee80211_frame *)m->b_rptr;
2626 
2627 	if (ic->ic_opmode == IEEE80211_M_IBSS &&
2628 	    (!(IEEE80211_IS_MULTICAST(wh->i_addr1)))) {
2629 		mutex_enter(&sc->sc_glock);
2630 		mutex_enter(&sc->sc_ibss.node_tb_lock);
2631 
2632 		/*
2633 		 * search for node in ibss node table
2634 		 */
2635 		for (index1 = IWK_STA_ID;
2636 		    index1 < IWK_STATION_COUNT; index1++) {
2637 			if (sc->sc_ibss.ibss_node_tb[index1].used &&
2638 			    IEEE80211_ADDR_EQ(sc->sc_ibss.
2639 			    ibss_node_tb[index1].node.bssid,
2640 			    wh->i_addr1)) {
2641 				break;
2642 			}
2643 		}
2644 
2645 		/*
2646 		 * if don't find in ibss node table
2647 		 */
2648 		if (index1 >= IWK_BROADCAST_ID) {
2649 			err = iwk_clean_add_node_ibss(ic,
2650 			    wh->i_addr1, &index2);
2651 			if (err != IWK_SUCCESS) {
2652 				cmn_err(CE_WARN, "iwk_send(): "
2653 				    "failed to clean all nodes "
2654 				    "and add one node\n");
2655 				mutex_exit(&sc->sc_ibss.node_tb_lock);
2656 				mutex_exit(&sc->sc_glock);
2657 				freemsg(m);
2658 				sc->sc_tx_err++;
2659 				err = IWK_SUCCESS;
2660 				goto exit;
2661 			}
2662 			index = index2;
2663 		} else {
2664 			index = index1;
2665 		}
2666 		mutex_exit(&sc->sc_ibss.node_tb_lock);
2667 		mutex_exit(&sc->sc_glock);
2668 	}
2669 
2670 	in = ieee80211_find_txnode(ic, wh->i_addr1);
2671 	if (in == NULL) {
2672 		cmn_err(CE_WARN, "iwk_send(): failed to find tx node\n");
2673 		freemsg(m);
2674 		sc->sc_tx_err++;
2675 		err = IWK_SUCCESS;
2676 		goto exit;
2677 	}
2678 	(void) ieee80211_encap(ic, m, in);
2679 
2680 	cmd->hdr.type = REPLY_TX;
2681 	cmd->hdr.flags = 0;
2682 	cmd->hdr.qid = ring->qid;
2683 	cmd->hdr.idx = ring->cur;
2684 
2685 	tx = (iwk_tx_cmd_t *)cmd->data;
2686 	tx->tx_flags = 0;
2687 
2688 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2689 		tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK));
2690 	} else {
2691 		tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
2692 	}
2693 
2694 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
2695 		k = ieee80211_crypto_encap(ic, m);
2696 		if (k == NULL) {
2697 			freemsg(m);
2698 			sc->sc_tx_err++;
2699 			err = IWK_SUCCESS;
2700 			goto exit;
2701 		}
2702 
2703 		if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
2704 			tx->sec_ctl = 2; /* for CCMP */
2705 			tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
2706 			(void) memcpy(&tx->key, k->wk_key, k->wk_keylen);
2707 		}
2708 
2709 		/* packet header may have moved, reset our local pointer */
2710 		wh = (struct ieee80211_frame *)m->b_rptr;
2711 	}
2712 
2713 	len = msgdsize(m);
2714 
2715 #ifdef DEBUG
2716 	if (iwk_dbg_flags & IWK_DEBUG_TX)
2717 		ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0);
2718 #endif
2719 
2720 	/* pickup a rate */
2721 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
2722 	    IEEE80211_FC0_TYPE_MGT) {
2723 		/* mgmt frames are sent at 1M */
2724 		rate = in->in_rates.ir_rates[0];
2725 	} else {
2726 		/*
2727 		 * do it here for the software way rate control.
2728 		 * later for rate scaling in hardware.
2729 		 * maybe like the following, for management frame:
2730 		 * tx->initial_rate_index = LINK_QUAL_MAX_RETRY_NUM - 1;
2731 		 * for data frame:
2732 		 * tx->tx_flags |= (LE_32(TX_CMD_FLG_STA_RATE_MSK));
2733 		 * rate = in->in_rates.ir_rates[in->in_txrate];
2734 		 * tx->initial_rate_index = 1;
2735 		 *
2736 		 * now the txrate is determined in tx cmd flags, set to the
2737 		 * max value 54M for 11g and 11M for 11b.
2738 		 */
2739 
2740 		if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
2741 			rate = ic->ic_fixed_rate;
2742 		} else {
2743 			rate = in->in_rates.ir_rates[in->in_txrate];
2744 		}
2745 	}
2746 	rate &= IEEE80211_RATE_VAL;
2747 	IWK_DBG((IWK_DEBUG_TX, "tx rate[%d of %d] = %x",
2748 	    in->in_txrate, in->in_rates.ir_nrates, rate));
2749 
2750 	tx->tx_flags |= (LE_32(TX_CMD_FLG_SEQ_CTL_MSK));
2751 
2752 	len0 = roundup(4 + sizeof (iwk_tx_cmd_t) + hdrlen, 4);
2753 	if (len0 != (4 + sizeof (iwk_tx_cmd_t) + hdrlen))
2754 		tx->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
2755 
2756 	/* retrieve destination node's id */
2757 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2758 		tx->sta_id = IWK_BROADCAST_ID;
2759 	} else {
2760 		if (ic->ic_opmode == IEEE80211_M_IBSS)
2761 			tx->sta_id = index;
2762 		else
2763 			tx->sta_id = IWK_AP_ID;
2764 	}
2765 
2766 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
2767 	    IEEE80211_FC0_TYPE_MGT) {
2768 		/* tell h/w to set timestamp in probe responses */
2769 		if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2770 		    IEEE80211_FC0_SUBTYPE_PROBE_RESP)
2771 			tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK);
2772 
2773 		if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2774 		    IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
2775 		    ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2776 		    IEEE80211_FC0_SUBTYPE_REASSOC_REQ))
2777 			tx->timeout.pm_frame_timeout = 3;
2778 		else
2779 			tx->timeout.pm_frame_timeout = 2;
2780 	} else
2781 		tx->timeout.pm_frame_timeout = 0;
2782 	if (rate == 2 || rate == 4 || rate == 11 || rate == 22)
2783 		masks |= RATE_MCS_CCK_MSK;
2784 
2785 	masks |= RATE_MCS_ANT_B_MSK;
2786 	tx->rate.r.rate_n_flags = (iwk_rate_to_plcp(rate) | masks);
2787 
2788 	IWK_DBG((IWK_DEBUG_TX, "tx flag = %x",
2789 	    tx->tx_flags));
2790 
2791 	tx->rts_retry_limit = 60;
2792 	tx->data_retry_limit = 15;
2793 
2794 	tx->stop_time.life_time  = LE_32(0xffffffff);
2795 
2796 	tx->len = LE_16(len);
2797 
2798 	tx->dram_lsb_ptr =
2799 	    data->paddr_cmd + 4 + offsetof(iwk_tx_cmd_t, scratch);
2800 	tx->dram_msb_ptr = 0;
2801 	tx->driver_txop = 0;
2802 	tx->next_frame_len = 0;
2803 
2804 	(void) memcpy(tx + 1, m->b_rptr, hdrlen);
2805 	m->b_rptr += hdrlen;
2806 	(void) memcpy(data->dma_data.mem_va, m->b_rptr, len - hdrlen);
2807 
2808 	IWK_DBG((IWK_DEBUG_TX, "sending data: qid=%d idx=%d len=%d",
2809 	    ring->qid, ring->cur, len));
2810 
2811 	/*
2812 	 * first segment includes the tx cmd plus the 802.11 header,
2813 	 * the second includes the remaining of the 802.11 frame.
2814 	 */
2815 	desc->val0 = LE_32(2 << 24);
2816 	desc->pa[0].tb1_addr = LE_32(data->paddr_cmd);
2817 	desc->pa[0].val1 = ((len0 << 4) & 0xfff0) |
2818 	    ((data->dma_data.cookie.dmac_address & 0xffff) << 16);
2819 	desc->pa[0].val2 =
2820 	    ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) |
2821 	    ((len - hdrlen) << 20);
2822 	IWK_DBG((IWK_DEBUG_TX, "phy addr1 = 0x%x phy addr2 = 0x%x "
2823 	    "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x",
2824 	    data->paddr_cmd, data->dma_data.cookie.dmac_address,
2825 	    len0, len - hdrlen, desc->pa[0].val1, desc->pa[0].val2));
2826 
2827 	mutex_enter(&sc->sc_tx_lock);
2828 	ring->queued++;
2829 	mutex_exit(&sc->sc_tx_lock);
2830 
2831 	/* kick ring */
2832 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
2833 	    tfd_offset[ring->cur].val = 8 + len;
2834 	if (ring->cur < IWK_MAX_WIN_SIZE) {
2835 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
2836 		    tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8 + len;
2837 	}
2838 
2839 	IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
2840 	IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
2841 
2842 	ring->cur = (ring->cur + 1) % ring->count;
2843 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
2844 	freemsg(m);
2845 	/* release node reference */
2846 	ieee80211_free_node(in);
2847 
2848 	ic->ic_stats.is_tx_bytes += len;
2849 	ic->ic_stats.is_tx_frags++;
2850 
2851 	if (sc->sc_tx_timer == 0)
2852 		sc->sc_tx_timer = 10;
2853 exit:
2854 	return (err);
2855 }
2856 
2857 static void
2858 iwk_m_ioctl(void* arg, queue_t *wq, mblk_t *mp)
2859 {
2860 	iwk_sc_t	*sc  = (iwk_sc_t *)arg;
2861 	ieee80211com_t	*ic = &sc->sc_ic;
2862 
2863 	enum ieee80211_opmode		oldmod;
2864 	iwk_tx_power_table_cmd_t	txpower;
2865 	iwk_add_sta_t			node;
2866 	iwk_link_quality_cmd_t		link_quality;
2867 	uint16_t			masks = 0;
2868 	int				i, err, err1;
2869 
2870 	oldmod = ic->ic_opmode;
2871 
2872 	mutex_enter(&sc->sc_glock);
2873 	if (sc->sc_flags & (IWK_F_SUSPEND | IWK_F_HW_ERR_RECOVER)) {
2874 		miocnak(wq, mp, 0, ENXIO);
2875 		mutex_exit(&sc->sc_glock);
2876 		return;
2877 	}
2878 	mutex_exit(&sc->sc_glock);
2879 
2880 	err = ieee80211_ioctl(ic, wq, mp);
2881 
2882 	/*
2883 	 * return to STA mode
2884 	 */
2885 	if ((0 == err || ENETRESET == err) && (oldmod != ic->ic_opmode) &&
2886 	    (ic->ic_opmode == IEEE80211_M_STA)) {
2887 		/* configure rxon */
2888 		(void) memset(&sc->sc_config, 0, sizeof (iwk_rxon_cmd_t));
2889 		IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
2890 		IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
2891 		sc->sc_config.chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
2892 		sc->sc_config.flags = (RXON_FLG_TSF2HOST_MSK |
2893 		    RXON_FLG_AUTO_DETECT_MSK |
2894 		    RXON_FLG_BAND_24G_MSK);
2895 		sc->sc_config.flags &= (~RXON_FLG_CCK_MSK);
2896 		switch (ic->ic_opmode) {
2897 		case IEEE80211_M_STA:
2898 			sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
2899 			sc->sc_config.filter_flags |=
2900 			    LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
2901 			    RXON_FILTER_DIS_DECRYPT_MSK |
2902 			    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
2903 			break;
2904 		case IEEE80211_M_IBSS:
2905 		case IEEE80211_M_AHDEMO:
2906 			sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
2907 			sc->sc_config.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2908 			sc->sc_config.filter_flags =
2909 			    LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
2910 			    RXON_FILTER_DIS_DECRYPT_MSK |
2911 			    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
2912 			break;
2913 		case IEEE80211_M_HOSTAP:
2914 			sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
2915 			break;
2916 		case IEEE80211_M_MONITOR:
2917 			sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
2918 			sc->sc_config.filter_flags |=
2919 			    LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
2920 			    RXON_FILTER_CTL2HOST_MSK |
2921 			    RXON_FILTER_PROMISC_MSK);
2922 			break;
2923 		}
2924 		sc->sc_config.cck_basic_rates  = 0x0f;
2925 		sc->sc_config.ofdm_basic_rates = 0xff;
2926 		sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
2927 		sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff;
2928 		/* set antenna */
2929 		mutex_enter(&sc->sc_glock);
2930 		sc->sc_config.rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
2931 		    LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) |
2932 		    (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
2933 		    (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
2934 		err1 = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
2935 		    sizeof (iwk_rxon_cmd_t), 1);
2936 		if (err1 != IWK_SUCCESS) {
2937 			cmn_err(CE_WARN, "iwk_m_ioctl(): "
2938 			    "failed to set configure command"
2939 			    " please run (ifconfig unplumb and"
2940 			    " ifconfig plumb)\n");
2941 		}
2942 		/*
2943 		 * set Tx power for 2.4GHz channels
2944 		 * (need further investigation. fix tx power at present)
2945 		 */
2946 		(void) memset(&txpower, 0, sizeof (txpower));
2947 		txpower.band = 1; /* for 2.4G */
2948 		txpower.channel = sc->sc_config.chan;
2949 		txpower.channel_normal_width = 0;
2950 		for (i = 0; i < POWER_TABLE_NUM_HT_OFDM_ENTRIES; i++) {
2951 			txpower.tx_power.ht_ofdm_power[i].
2952 			    s.ramon_tx_gain = 0x3f3f;
2953 			txpower.tx_power.ht_ofdm_power[i].
2954 			    s.dsp_predis_atten = 110 | (110 << 8);
2955 		}
2956 		txpower.tx_power.ht_ofdm_power[POWER_TABLE_NUM_HT_OFDM_ENTRIES].
2957 		    s.ramon_tx_gain = 0x3f3f;
2958 		txpower.tx_power.ht_ofdm_power[POWER_TABLE_NUM_HT_OFDM_ENTRIES].
2959 		    s.dsp_predis_atten = 110 | (110 << 8);
2960 		err1 = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &txpower,
2961 		    sizeof (txpower), 1);
2962 		if (err1 != IWK_SUCCESS) {
2963 			cmn_err(CE_WARN, "iwk_m_ioctl(): failed to set txpower"
2964 			    " please run (ifconfig unplumb "
2965 			    "and ifconfig plumb)\n");
2966 		}
2967 		/* add broadcast node so that we can send broadcast frame */
2968 		(void) memset(&node, 0, sizeof (node));
2969 		(void) memset(node.bssid, 0xff, 6);
2970 		node.id = IWK_BROADCAST_ID;
2971 		err1 = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
2972 		if (err1 != IWK_SUCCESS) {
2973 			cmn_err(CE_WARN, "iwk_m_ioctl(): "
2974 			    "failed to add broadcast node\n");
2975 		}
2976 
2977 		/* TX_LINK_QUALITY cmd */
2978 		(void) memset(&link_quality, 0, sizeof (link_quality));
2979 		for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
2980 			masks |= RATE_MCS_CCK_MSK;
2981 			masks |= RATE_MCS_ANT_B_MSK;
2982 			masks &= ~RATE_MCS_ANT_A_MSK;
2983 			link_quality.rate_n_flags[i] =
2984 			    iwk_rate_to_plcp(2) | masks;
2985 		}
2986 		link_quality.general_params.single_stream_ant_msk = 2;
2987 		link_quality.general_params.dual_stream_ant_msk = 3;
2988 		link_quality.agg_params.agg_dis_start_th = 3;
2989 		link_quality.agg_params.agg_time_limit = LE_16(4000);
2990 		link_quality.sta_id = IWK_BROADCAST_ID;
2991 		err1 = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
2992 		    sizeof (link_quality), 1);
2993 		if (err1 != IWK_SUCCESS) {
2994 			cmn_err(CE_WARN, "iwk_m_ioctl(): "
2995 			    "failed to config link quality table\n");
2996 		}
2997 		mutex_exit(&sc->sc_glock);
2998 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2999 	}
3000 
3001 	if (err == ENETRESET) {
3002 		/*
3003 		 * This is special for the hidden AP connection.
3004 		 * In any case, we should make sure only one 'scan'
3005 		 * in the driver for a 'connect' CLI command. So
3006 		 * when connecting to a hidden AP, the scan is just
3007 		 * sent out to the air when we know the desired
3008 		 * essid of the AP we want to connect.
3009 		 */
3010 		if (ic->ic_des_esslen) {
3011 			if (sc->sc_flags & IWK_F_RUNNING) {
3012 				iwk_m_stop(sc);
3013 				(void) iwk_m_start(sc);
3014 				(void) ieee80211_new_state(ic,
3015 				    IEEE80211_S_SCAN, -1);
3016 			}
3017 		}
3018 	}
3019 }
3020 
3021 /*
3022  * callback functions for set/get properties
3023  */
3024 /* ARGSUSED */
3025 static int
3026 iwk_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3027     uint_t pr_flags, uint_t wldp_length, void *wldp_buf, uint_t *perm)
3028 {
3029 	int		err = 0;
3030 	iwk_sc_t	*sc = (iwk_sc_t *)arg;
3031 
3032 	err = ieee80211_getprop(&sc->sc_ic, pr_name, wldp_pr_num,
3033 	    pr_flags, wldp_length, wldp_buf, perm);
3034 
3035 	return (err);
3036 }
3037 static int
3038 iwk_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3039     uint_t wldp_length, const void *wldp_buf)
3040 {
3041 	int		err;
3042 	iwk_sc_t	*sc = (iwk_sc_t *)arg;
3043 	ieee80211com_t	*ic = &sc->sc_ic;
3044 
3045 	mutex_enter(&sc->sc_glock);
3046 	if (sc->sc_flags & (IWK_F_SUSPEND | IWK_F_HW_ERR_RECOVER)) {
3047 		mutex_exit(&sc->sc_glock);
3048 		return (ENXIO);
3049 	}
3050 	mutex_exit(&sc->sc_glock);
3051 
3052 	err = ieee80211_setprop(ic, pr_name, wldp_pr_num, wldp_length,
3053 	    wldp_buf);
3054 
3055 	if (err == ENETRESET) {
3056 		if (ic->ic_des_esslen) {
3057 			if (sc->sc_flags & IWK_F_RUNNING) {
3058 				iwk_m_stop(sc);
3059 				(void) iwk_m_start(sc);
3060 				(void) ieee80211_new_state(ic,
3061 				    IEEE80211_S_SCAN, -1);
3062 			}
3063 		}
3064 		err = 0;
3065 	}
3066 
3067 	return (err);
3068 }
3069 
3070 /*ARGSUSED*/
3071 static int
3072 iwk_m_stat(void *arg, uint_t stat, uint64_t *val)
3073 {
3074 	iwk_sc_t	*sc  = (iwk_sc_t *)arg;
3075 	ieee80211com_t	*ic = &sc->sc_ic;
3076 	ieee80211_node_t *in;
3077 
3078 	mutex_enter(&sc->sc_glock);
3079 	switch (stat) {
3080 	case MAC_STAT_IFSPEED:
3081 		in = ic->ic_bss;
3082 		*val = ((ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) ?
3083 		    IEEE80211_RATE(in->in_txrate) :
3084 		    ic->ic_fixed_rate) / 2 * 1000000;
3085 		break;
3086 	case MAC_STAT_NOXMTBUF:
3087 		*val = sc->sc_tx_nobuf;
3088 		break;
3089 	case MAC_STAT_NORCVBUF:
3090 		*val = sc->sc_rx_nobuf;
3091 		break;
3092 	case MAC_STAT_IERRORS:
3093 		*val = sc->sc_rx_err;
3094 		break;
3095 	case MAC_STAT_RBYTES:
3096 		*val = ic->ic_stats.is_rx_bytes;
3097 		break;
3098 	case MAC_STAT_IPACKETS:
3099 		*val = ic->ic_stats.is_rx_frags;
3100 		break;
3101 	case MAC_STAT_OBYTES:
3102 		*val = ic->ic_stats.is_tx_bytes;
3103 		break;
3104 	case MAC_STAT_OPACKETS:
3105 		*val = ic->ic_stats.is_tx_frags;
3106 		break;
3107 	case MAC_STAT_OERRORS:
3108 	case WIFI_STAT_TX_FAILED:
3109 		*val = sc->sc_tx_err;
3110 		break;
3111 	case WIFI_STAT_TX_RETRANS:
3112 		*val = sc->sc_tx_retries;
3113 		break;
3114 	case WIFI_STAT_FCS_ERRORS:
3115 	case WIFI_STAT_WEP_ERRORS:
3116 	case WIFI_STAT_TX_FRAGS:
3117 	case WIFI_STAT_MCAST_TX:
3118 	case WIFI_STAT_RTS_SUCCESS:
3119 	case WIFI_STAT_RTS_FAILURE:
3120 	case WIFI_STAT_ACK_FAILURE:
3121 	case WIFI_STAT_RX_FRAGS:
3122 	case WIFI_STAT_MCAST_RX:
3123 	case WIFI_STAT_RX_DUPS:
3124 		mutex_exit(&sc->sc_glock);
3125 		return (ieee80211_stat(ic, stat, val));
3126 	default:
3127 		mutex_exit(&sc->sc_glock);
3128 		return (ENOTSUP);
3129 	}
3130 	mutex_exit(&sc->sc_glock);
3131 
3132 	return (IWK_SUCCESS);
3133 
3134 }
3135 
3136 static int
3137 iwk_m_start(void *arg)
3138 {
3139 	iwk_sc_t *sc = (iwk_sc_t *)arg;
3140 	ieee80211com_t	*ic = &sc->sc_ic;
3141 	int err;
3142 
3143 	err = iwk_init(sc);
3144 
3145 	if (err != IWK_SUCCESS) {
3146 		/*
3147 		 * The hw init err(eg. RF is OFF). Return Success to make
3148 		 * the 'plumb' succeed. The iwk_thread() tries to re-init
3149 		 * background.
3150 		 */
3151 		cmn_err(CE_WARN, "iwk_m_start(): failed to initialize "
3152 		    "hardware\n");
3153 		mutex_enter(&sc->sc_glock);
3154 		sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
3155 		mutex_exit(&sc->sc_glock);
3156 		return (IWK_SUCCESS);
3157 	}
3158 
3159 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3160 
3161 	mutex_enter(&sc->sc_glock);
3162 	sc->sc_flags |= IWK_F_RUNNING;
3163 	mutex_exit(&sc->sc_glock);
3164 
3165 	return (IWK_SUCCESS);
3166 }
3167 
3168 static void
3169 iwk_m_stop(void *arg)
3170 {
3171 	iwk_sc_t *sc = (iwk_sc_t *)arg;
3172 	ieee80211com_t	*ic = &sc->sc_ic;
3173 
3174 	iwk_stop(sc);
3175 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3176 	mutex_enter(&sc->sc_mt_lock);
3177 	sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
3178 	sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL;
3179 	mutex_exit(&sc->sc_mt_lock);
3180 	mutex_enter(&sc->sc_glock);
3181 	sc->sc_flags &= ~IWK_F_RUNNING;
3182 	sc->sc_flags &= ~IWK_F_SCANNING;
3183 	mutex_exit(&sc->sc_glock);
3184 }
3185 
3186 /*ARGSUSED*/
3187 static int
3188 iwk_m_unicst(void *arg, const uint8_t *macaddr)
3189 {
3190 	iwk_sc_t *sc = (iwk_sc_t *)arg;
3191 	ieee80211com_t	*ic = &sc->sc_ic;
3192 	int err;
3193 
3194 	if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) {
3195 		IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr);
3196 		mutex_enter(&sc->sc_glock);
3197 		err = iwk_config(sc);
3198 		mutex_exit(&sc->sc_glock);
3199 		if (err != IWK_SUCCESS) {
3200 			cmn_err(CE_WARN,
3201 			    "iwk_m_unicst(): "
3202 			    "failed to configure device\n");
3203 			goto fail;
3204 		}
3205 	}
3206 	return (IWK_SUCCESS);
3207 fail:
3208 	return (err);
3209 }
3210 
3211 /*ARGSUSED*/
3212 static int
3213 iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m)
3214 {
3215 	return (IWK_SUCCESS);
3216 }
3217 
3218 /*ARGSUSED*/
3219 static int
3220 iwk_m_promisc(void *arg, boolean_t on)
3221 {
3222 	return (IWK_SUCCESS);
3223 }
3224 
3225 static void
3226 iwk_thread(iwk_sc_t *sc)
3227 {
3228 	ieee80211com_t	*ic = &sc->sc_ic;
3229 	clock_t clk;
3230 	int times = 0, err, n = 0, timeout = 0;
3231 	uint32_t tmp;
3232 
3233 	mutex_enter(&sc->sc_mt_lock);
3234 	while (sc->sc_mf_thread_switch) {
3235 		tmp = IWK_READ(sc, CSR_GP_CNTRL);
3236 		if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) {
3237 			sc->sc_flags &= ~IWK_F_RADIO_OFF;
3238 		} else {
3239 			sc->sc_flags |= IWK_F_RADIO_OFF;
3240 		}
3241 		/*
3242 		 * If in SUSPEND or the RF is OFF, do nothing
3243 		 */
3244 		if ((sc->sc_flags & IWK_F_SUSPEND) ||
3245 		    (sc->sc_flags & IWK_F_RADIO_OFF)) {
3246 			mutex_exit(&sc->sc_mt_lock);
3247 			delay(drv_usectohz(100000));
3248 			mutex_enter(&sc->sc_mt_lock);
3249 			continue;
3250 		}
3251 
3252 		/*
3253 		 * recovery fatal error
3254 		 */
3255 		if (ic->ic_mach &&
3256 		    (sc->sc_flags & IWK_F_HW_ERR_RECOVER)) {
3257 
3258 			IWK_DBG((IWK_DEBUG_FW,
3259 			    "iwk_thread(): "
3260 			    "try to recover fatal hw error: %d\n", times++));
3261 
3262 			iwk_stop(sc);
3263 
3264 			mutex_exit(&sc->sc_mt_lock);
3265 			ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3266 			delay(drv_usectohz(2000000 + n*500000));
3267 			mutex_enter(&sc->sc_mt_lock);
3268 
3269 			err = iwk_init(sc);
3270 			if (err != IWK_SUCCESS) {
3271 				n++;
3272 				if (n < 20)
3273 					continue;
3274 			}
3275 			n = 0;
3276 			if (!err)
3277 				sc->sc_flags |= IWK_F_RUNNING;
3278 			sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
3279 			mutex_exit(&sc->sc_mt_lock);
3280 			delay(drv_usectohz(2000000));
3281 			if (sc->sc_ostate != IEEE80211_S_INIT)
3282 				ieee80211_new_state(ic, IEEE80211_S_SCAN, 0);
3283 			mutex_enter(&sc->sc_mt_lock);
3284 		}
3285 
3286 		if (ic->ic_mach && (sc->sc_flags & IWK_F_LAZY_RESUME)) {
3287 			IWK_DBG((IWK_DEBUG_RESUME,
3288 			    "iwk_thread(): "
3289 			    "lazy resume\n"));
3290 			sc->sc_flags &= ~IWK_F_LAZY_RESUME;
3291 			mutex_exit(&sc->sc_mt_lock);
3292 			/*
3293 			 * NB: under WPA mode, this call hangs (door problem?)
3294 			 * when called in iwk_attach() and iwk_detach() while
3295 			 * system is in the procedure of CPR. To be safe, let
3296 			 * the thread do this.
3297 			 */
3298 			ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
3299 			mutex_enter(&sc->sc_mt_lock);
3300 		}
3301 
3302 		if (ic->ic_mach &&
3303 		    (sc->sc_flags & IWK_F_SCANNING) && sc->sc_scan_pending) {
3304 			IWK_DBG((IWK_DEBUG_SCAN,
3305 			    "iwk_thread(): "
3306 			    "wait for probe response\n"));
3307 			sc->sc_scan_pending--;
3308 			mutex_exit(&sc->sc_mt_lock);
3309 			delay(drv_usectohz(200000));
3310 			if (sc->sc_flags & IWK_F_SCANNING)
3311 				ieee80211_next_scan(ic);
3312 			mutex_enter(&sc->sc_mt_lock);
3313 		}
3314 
3315 		/*
3316 		 * rate ctl
3317 		 */
3318 		if (ic->ic_mach &&
3319 		    (sc->sc_flags & IWK_F_RATE_AUTO_CTL)) {
3320 			clk = ddi_get_lbolt();
3321 			if (clk > sc->sc_clk + drv_usectohz(500000)) {
3322 				iwk_amrr_timeout(sc);
3323 			}
3324 		}
3325 
3326 		mutex_exit(&sc->sc_mt_lock);
3327 		delay(drv_usectohz(100000));
3328 		mutex_enter(&sc->sc_mt_lock);
3329 
3330 		if (sc->sc_tx_timer) {
3331 			timeout++;
3332 			if (timeout == 10) {
3333 				sc->sc_tx_timer--;
3334 				if (sc->sc_tx_timer == 0) {
3335 					sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
3336 					sc->sc_ostate = IEEE80211_S_RUN;
3337 					IWK_DBG((IWK_DEBUG_FW,
3338 					    "iwk_thread(): try to recover from"
3339 					    " 'send fail\n"));
3340 				}
3341 				timeout = 0;
3342 			}
3343 		}
3344 
3345 	}
3346 	sc->sc_mf_thread = NULL;
3347 	cv_signal(&sc->sc_mt_cv);
3348 	mutex_exit(&sc->sc_mt_lock);
3349 }
3350 
3351 
3352 /*
3353  * Send a command to the firmware.
3354  */
3355 static int
3356 iwk_cmd(iwk_sc_t *sc, int code, const void *buf, int size, int async)
3357 {
3358 	iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM];
3359 	iwk_tx_desc_t *desc;
3360 	iwk_cmd_t *cmd;
3361 	clock_t clk;
3362 
3363 	ASSERT(size <= sizeof (cmd->data));
3364 	ASSERT(mutex_owned(&sc->sc_glock));
3365 
3366 	IWK_DBG((IWK_DEBUG_CMD, "iwk_cmd() code[%d]", code));
3367 	desc = ring->data[ring->cur].desc;
3368 	cmd = ring->data[ring->cur].cmd;
3369 
3370 	cmd->hdr.type = (uint8_t)code;
3371 	cmd->hdr.flags = 0;
3372 	cmd->hdr.qid = ring->qid;
3373 	cmd->hdr.idx = ring->cur;
3374 	(void) memcpy(cmd->data, buf, size);
3375 	(void) memset(desc, 0, sizeof (*desc));
3376 
3377 	desc->val0 = LE_32(1 << 24);
3378 	desc->pa[0].tb1_addr =
3379 	    (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff);
3380 	desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0;
3381 
3382 	/* kick cmd ring XXX */
3383 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3384 	    tfd_offset[ring->cur].val = 8;
3385 	if (ring->cur < IWK_MAX_WIN_SIZE) {
3386 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3387 		    tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8;
3388 	}
3389 	ring->cur = (ring->cur + 1) % ring->count;
3390 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3391 
3392 	if (async)
3393 		return (IWK_SUCCESS);
3394 	else {
3395 		sc->sc_flags &= ~IWK_F_CMD_DONE;
3396 		clk = ddi_get_lbolt() + drv_usectohz(2000000);
3397 		while (!(sc->sc_flags & IWK_F_CMD_DONE)) {
3398 			if (cv_timedwait(&sc->sc_cmd_cv, &sc->sc_glock, clk) <
3399 			    0)
3400 				break;
3401 		}
3402 		if (sc->sc_flags & IWK_F_CMD_DONE)
3403 			return (IWK_SUCCESS);
3404 		else
3405 			return (IWK_FAIL);
3406 	}
3407 }
3408 
3409 static void
3410 iwk_set_led(iwk_sc_t *sc, uint8_t id, uint8_t off, uint8_t on)
3411 {
3412 	iwk_led_cmd_t led;
3413 
3414 	led.interval = LE_32(100000);	/* unit: 100ms */
3415 	led.id = id;
3416 	led.off = off;
3417 	led.on = on;
3418 
3419 	(void) iwk_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1);
3420 }
3421 
3422 static int
3423 iwk_hw_set_before_auth(iwk_sc_t *sc)
3424 {
3425 	ieee80211com_t *ic = &sc->sc_ic;
3426 	ieee80211_node_t *in = ic->ic_bss;
3427 	iwk_add_sta_t node;
3428 	iwk_link_quality_cmd_t link_quality;
3429 	struct ieee80211_rateset rs;
3430 	uint16_t masks = 0, rate;
3431 	int i, err;
3432 
3433 	/* update adapter's configuration according the info of target AP */
3434 	IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
3435 	sc->sc_config.chan = ieee80211_chan2ieee(ic, in->in_chan);
3436 	if (ic->ic_curmode == IEEE80211_MODE_11B) {
3437 		sc->sc_config.cck_basic_rates  = 0x03;
3438 		sc->sc_config.ofdm_basic_rates = 0;
3439 	} else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
3440 	    (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
3441 		sc->sc_config.cck_basic_rates  = 0;
3442 		sc->sc_config.ofdm_basic_rates = 0x15;
3443 	} else { /* assume 802.11b/g */
3444 		sc->sc_config.cck_basic_rates  = 0x0f;
3445 		sc->sc_config.ofdm_basic_rates = 0xff;
3446 	}
3447 
3448 	sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
3449 	    RXON_FLG_SHORT_SLOT_MSK);
3450 
3451 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
3452 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK);
3453 	else
3454 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK);
3455 
3456 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
3457 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
3458 	else
3459 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK);
3460 
3461 	IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x "
3462 	    "filter_flags %x  cck %x ofdm %x"
3463 	    " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n",
3464 	    sc->sc_config.chan, sc->sc_config.flags,
3465 	    sc->sc_config.filter_flags,
3466 	    sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates,
3467 	    sc->sc_config.bssid[0], sc->sc_config.bssid[1],
3468 	    sc->sc_config.bssid[2], sc->sc_config.bssid[3],
3469 	    sc->sc_config.bssid[4], sc->sc_config.bssid[5]));
3470 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
3471 	    sizeof (iwk_rxon_cmd_t), 1);
3472 	if (err != IWK_SUCCESS) {
3473 		cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
3474 		    " failed to config chan%d\n",
3475 		    sc->sc_config.chan);
3476 		return (err);
3477 	}
3478 
3479 	/* obtain current temperature of chipset */
3480 	sc->sc_tempera = iwk_curr_tempera(sc);
3481 
3482 	/* make Tx power calibration to determine the gains of DSP and radio */
3483 	err = iwk_tx_power_calibration(sc);
3484 	if (err) {
3485 		cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
3486 		    "failed to set tx power table\n");
3487 		return (err);
3488 	}
3489 
3490 	/* add default AP node */
3491 	(void) memset(&node, 0, sizeof (node));
3492 	IEEE80211_ADDR_COPY(node.bssid, in->in_bssid);
3493 	node.id = IWK_AP_ID;
3494 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
3495 	if (err != IWK_SUCCESS) {
3496 		cmn_err(CE_WARN, "iwk_hw_set_before_auth(): "
3497 		    "failed to add BSS node\n");
3498 		return (err);
3499 	}
3500 
3501 	/* TX_LINK_QUALITY cmd */
3502 	(void) memset(&link_quality, 0, sizeof (link_quality));
3503 	rs = ic->ic_sup_rates[ieee80211_chan2mode(ic, ic->ic_curchan)];
3504 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3505 		if (i < rs.ir_nrates)
3506 			rate = rs.ir_rates[rs.ir_nrates - i];
3507 		else
3508 			rate = 2;
3509 		if (rate == 2 || rate == 4 || rate == 11 || rate == 22)
3510 			masks |= RATE_MCS_CCK_MSK;
3511 		masks |= RATE_MCS_ANT_B_MSK;
3512 		masks &= ~RATE_MCS_ANT_A_MSK;
3513 		link_quality.rate_n_flags[i] =
3514 		    iwk_rate_to_plcp(rate) | masks;
3515 	}
3516 
3517 	link_quality.general_params.single_stream_ant_msk = 2;
3518 	link_quality.general_params.dual_stream_ant_msk = 3;
3519 	link_quality.agg_params.agg_dis_start_th = 3;
3520 	link_quality.agg_params.agg_time_limit = LE_16(4000);
3521 	link_quality.sta_id = IWK_AP_ID;
3522 	err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
3523 	    sizeof (link_quality), 1);
3524 	if (err != IWK_SUCCESS) {
3525 		cmn_err(CE_WARN, "iwk_hw_set_before_auth(): "
3526 		    "failed to config link quality table\n");
3527 		return (err);
3528 	}
3529 
3530 	return (IWK_SUCCESS);
3531 }
3532 
3533 /*
3534  * Send a scan request(assembly scan cmd) to the firmware.
3535  */
3536 static int
3537 iwk_scan(iwk_sc_t *sc)
3538 {
3539 	ieee80211com_t *ic = &sc->sc_ic;
3540 	iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM];
3541 	iwk_tx_desc_t *desc;
3542 	iwk_tx_data_t *data;
3543 	iwk_cmd_t *cmd;
3544 	iwk_scan_hdr_t *hdr;
3545 	iwk_scan_chan_t *chan;
3546 	struct ieee80211_frame *wh;
3547 	ieee80211_node_t *in = ic->ic_bss;
3548 	uint8_t essid[IEEE80211_NWID_LEN+1];
3549 	struct ieee80211_rateset *rs;
3550 	enum ieee80211_phymode mode;
3551 	uint8_t *frm;
3552 	int i, pktlen, nrates;
3553 
3554 	data = &ring->data[ring->cur];
3555 	desc = data->desc;
3556 	cmd = (iwk_cmd_t *)data->dma_data.mem_va;
3557 
3558 	cmd->hdr.type = REPLY_SCAN_CMD;
3559 	cmd->hdr.flags = 0;
3560 	cmd->hdr.qid = ring->qid;
3561 	cmd->hdr.idx = ring->cur | 0x40;
3562 
3563 	hdr = (iwk_scan_hdr_t *)cmd->data;
3564 	(void) memset(hdr, 0, sizeof (iwk_scan_hdr_t));
3565 	hdr->nchan = 1;
3566 	hdr->quiet_time = LE_16(50);
3567 	hdr->quiet_plcp_th = LE_16(1);
3568 
3569 	hdr->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
3570 	hdr->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3571 	    LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) |
3572 	    (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3573 	    (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3574 
3575 	hdr->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
3576 	hdr->tx_cmd.sta_id = IWK_BROADCAST_ID;
3577 	hdr->tx_cmd.stop_time.life_time = 0xffffffff;
3578 	hdr->tx_cmd.tx_flags |= (0x200);
3579 	hdr->tx_cmd.rate.r.rate_n_flags = iwk_rate_to_plcp(2);
3580 	hdr->tx_cmd.rate.r.rate_n_flags |=
3581 	    (RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK);
3582 	hdr->direct_scan[0].len = ic->ic_des_esslen;
3583 	hdr->direct_scan[0].id  = IEEE80211_ELEMID_SSID;
3584 
3585 	if (ic->ic_des_esslen) {
3586 		bcopy(ic->ic_des_essid, essid, ic->ic_des_esslen);
3587 		essid[ic->ic_des_esslen] = '\0';
3588 		IWK_DBG((IWK_DEBUG_SCAN, "directed scan %s\n", essid));
3589 
3590 		bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid,
3591 		    ic->ic_des_esslen);
3592 	} else {
3593 		bzero(hdr->direct_scan[0].ssid,
3594 		    sizeof (hdr->direct_scan[0].ssid));
3595 	}
3596 	/*
3597 	 * a probe request frame is required after the REPLY_SCAN_CMD
3598 	 */
3599 	wh = (struct ieee80211_frame *)(hdr + 1);
3600 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
3601 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
3602 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
3603 	(void) memset(wh->i_addr1, 0xff, 6);
3604 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr);
3605 	(void) memset(wh->i_addr3, 0xff, 6);
3606 	*(uint16_t *)&wh->i_dur[0] = 0;
3607 	*(uint16_t *)&wh->i_seq[0] = 0;
3608 
3609 	frm = (uint8_t *)(wh + 1);
3610 
3611 	/* essid IE */
3612 	if (in->in_esslen) {
3613 		bcopy(in->in_essid, essid, in->in_esslen);
3614 		essid[in->in_esslen] = '\0';
3615 		IWK_DBG((IWK_DEBUG_SCAN, "probe with ESSID %s\n",
3616 		    essid));
3617 	}
3618 	*frm++ = IEEE80211_ELEMID_SSID;
3619 	*frm++ = in->in_esslen;
3620 	(void) memcpy(frm, in->in_essid, in->in_esslen);
3621 	frm += in->in_esslen;
3622 
3623 	mode = ieee80211_chan2mode(ic, ic->ic_curchan);
3624 	rs = &ic->ic_sup_rates[mode];
3625 
3626 	/* supported rates IE */
3627 	*frm++ = IEEE80211_ELEMID_RATES;
3628 	nrates = rs->ir_nrates;
3629 	if (nrates > IEEE80211_RATE_SIZE)
3630 		nrates = IEEE80211_RATE_SIZE;
3631 	*frm++ = (uint8_t)nrates;
3632 	(void) memcpy(frm, rs->ir_rates, nrates);
3633 	frm += nrates;
3634 
3635 	/* supported xrates IE */
3636 	if (rs->ir_nrates > IEEE80211_RATE_SIZE) {
3637 		nrates = rs->ir_nrates - IEEE80211_RATE_SIZE;
3638 		*frm++ = IEEE80211_ELEMID_XRATES;
3639 		*frm++ = (uint8_t)nrates;
3640 		(void) memcpy(frm, rs->ir_rates + IEEE80211_RATE_SIZE, nrates);
3641 		frm += nrates;
3642 	}
3643 
3644 	/* optionnal IE (usually for wpa) */
3645 	if (ic->ic_opt_ie != NULL) {
3646 		(void) memcpy(frm, ic->ic_opt_ie, ic->ic_opt_ie_len);
3647 		frm += ic->ic_opt_ie_len;
3648 	}
3649 
3650 	/* setup length of probe request */
3651 	hdr->tx_cmd.len = LE_16(_PTRDIFF(frm, wh));
3652 	hdr->len = hdr->nchan * sizeof (iwk_scan_chan_t) +
3653 	    hdr->tx_cmd.len + sizeof (iwk_scan_hdr_t);
3654 
3655 	/*
3656 	 * the attribute of the scan channels are required after the probe
3657 	 * request frame.
3658 	 */
3659 	chan = (iwk_scan_chan_t *)frm;
3660 	for (i = 1; i <= hdr->nchan; i++, chan++) {
3661 		if (ic->ic_des_esslen) {
3662 			chan->type = 3;
3663 		} else {
3664 			chan->type = 1;
3665 		}
3666 
3667 		chan->chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
3668 		chan->tpc.tx_gain = 0x3f;
3669 		chan->tpc.dsp_atten = 110;
3670 		chan->active_dwell = LE_16(50);
3671 		chan->passive_dwell = LE_16(120);
3672 
3673 		frm += sizeof (iwk_scan_chan_t);
3674 	}
3675 
3676 	pktlen = _PTRDIFF(frm, cmd);
3677 
3678 	(void) memset(desc, 0, sizeof (*desc));
3679 	desc->val0 = LE_32(1 << 24);
3680 	desc->pa[0].tb1_addr =
3681 	    (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff);
3682 	desc->pa[0].val1 = (pktlen << 4) & 0xfff0;
3683 
3684 	/*
3685 	 * maybe for cmd, filling the byte cnt table is not necessary.
3686 	 * anyway, we fill it here.
3687 	 */
3688 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3689 	    tfd_offset[ring->cur].val = 8;
3690 	if (ring->cur < IWK_MAX_WIN_SIZE) {
3691 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3692 		    tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8;
3693 	}
3694 
3695 	/* kick cmd ring */
3696 	ring->cur = (ring->cur + 1) % ring->count;
3697 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3698 
3699 	return (IWK_SUCCESS);
3700 }
3701 
3702 static int
3703 iwk_config(iwk_sc_t *sc)
3704 {
3705 	ieee80211com_t *ic = &sc->sc_ic;
3706 	iwk_powertable_cmd_t powertable;
3707 	iwk_bt_cmd_t bt;
3708 	iwk_add_sta_t node;
3709 	iwk_link_quality_cmd_t link_quality;
3710 	int i, err;
3711 	uint16_t masks = 0;
3712 
3713 	/*
3714 	 * set power mode. Disable power management at present, do it later
3715 	 */
3716 	(void) memset(&powertable, 0, sizeof (powertable));
3717 	powertable.flags = LE_16(0x8);
3718 	err = iwk_cmd(sc, POWER_TABLE_CMD, &powertable,
3719 	    sizeof (powertable), 0);
3720 	if (err != IWK_SUCCESS) {
3721 		cmn_err(CE_WARN, "iwk_config(): failed to set power mode\n");
3722 		return (err);
3723 	}
3724 
3725 	/* configure bt coexistence */
3726 	(void) memset(&bt, 0, sizeof (bt));
3727 	bt.flags = 3;
3728 	bt.lead_time = 0xaa;
3729 	bt.max_kill = 1;
3730 	err = iwk_cmd(sc, REPLY_BT_CONFIG, &bt,
3731 	    sizeof (bt), 0);
3732 	if (err != IWK_SUCCESS) {
3733 		cmn_err(CE_WARN,
3734 		    "iwk_config(): "
3735 		    "failed to configurate bt coexistence\n");
3736 		return (err);
3737 	}
3738 
3739 	/* configure rxon */
3740 	(void) memset(&sc->sc_config, 0, sizeof (iwk_rxon_cmd_t));
3741 	IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
3742 	IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
3743 	sc->sc_config.chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
3744 	sc->sc_config.flags = (RXON_FLG_TSF2HOST_MSK |
3745 	    RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_BAND_24G_MSK);
3746 	sc->sc_config.flags &= (~RXON_FLG_CCK_MSK);
3747 	switch (ic->ic_opmode) {
3748 	case IEEE80211_M_STA:
3749 		sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
3750 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3751 		    RXON_FILTER_DIS_DECRYPT_MSK |
3752 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
3753 		break;
3754 	case IEEE80211_M_IBSS:
3755 	case IEEE80211_M_AHDEMO:
3756 		sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
3757 		sc->sc_config.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
3758 		sc->sc_config.filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3759 		    RXON_FILTER_DIS_DECRYPT_MSK |
3760 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
3761 		break;
3762 	case IEEE80211_M_HOSTAP:
3763 		sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
3764 		break;
3765 	case IEEE80211_M_MONITOR:
3766 		sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
3767 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3768 		    RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
3769 		break;
3770 	}
3771 	sc->sc_config.cck_basic_rates  = 0x0f;
3772 	sc->sc_config.ofdm_basic_rates = 0xff;
3773 
3774 	sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
3775 	sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff;
3776 
3777 	/* set antenna */
3778 
3779 	sc->sc_config.rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3780 	    LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) |
3781 	    (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3782 	    (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3783 
3784 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
3785 	    sizeof (iwk_rxon_cmd_t), 0);
3786 	if (err != IWK_SUCCESS) {
3787 		cmn_err(CE_WARN, "iwk_config(): "
3788 		    "failed to set configure command\n");
3789 		return (err);
3790 	}
3791 	/* obtain current temperature of chipset */
3792 	sc->sc_tempera = iwk_curr_tempera(sc);
3793 
3794 	/* make Tx power calibration to determine the gains of DSP and radio */
3795 	err = iwk_tx_power_calibration(sc);
3796 	if (err) {
3797 		cmn_err(CE_WARN, "iwk_config(): "
3798 		    "failed to set tx power table\n");
3799 		return (err);
3800 	}
3801 
3802 	/* add broadcast node so that we can send broadcast frame */
3803 	(void) memset(&node, 0, sizeof (node));
3804 	(void) memset(node.bssid, 0xff, 6);
3805 	node.id = IWK_BROADCAST_ID;
3806 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0);
3807 	if (err != IWK_SUCCESS) {
3808 		cmn_err(CE_WARN, "iwk_config(): "
3809 		    "failed to add broadcast node\n");
3810 		return (err);
3811 	}
3812 
3813 	/* TX_LINK_QUALITY cmd ? */
3814 	(void) memset(&link_quality, 0, sizeof (link_quality));
3815 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3816 		masks |= RATE_MCS_CCK_MSK;
3817 		masks |= RATE_MCS_ANT_B_MSK;
3818 		masks &= ~RATE_MCS_ANT_A_MSK;
3819 		link_quality.rate_n_flags[i] = iwk_rate_to_plcp(2) | masks;
3820 	}
3821 
3822 	link_quality.general_params.single_stream_ant_msk = 2;
3823 	link_quality.general_params.dual_stream_ant_msk = 3;
3824 	link_quality.agg_params.agg_dis_start_th = 3;
3825 	link_quality.agg_params.agg_time_limit = LE_16(4000);
3826 	link_quality.sta_id = IWK_BROADCAST_ID;
3827 	err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
3828 	    sizeof (link_quality), 0);
3829 	if (err != IWK_SUCCESS) {
3830 		cmn_err(CE_WARN, "iwk_config(): "
3831 		    "failed to config link quality table\n");
3832 		return (err);
3833 	}
3834 
3835 	return (IWK_SUCCESS);
3836 }
3837 
3838 static void
3839 iwk_stop_master(iwk_sc_t *sc)
3840 {
3841 	uint32_t tmp;
3842 	int n;
3843 
3844 	tmp = IWK_READ(sc, CSR_RESET);
3845 	IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER);
3846 
3847 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
3848 	if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) ==
3849 	    CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE)
3850 		return;
3851 
3852 	for (n = 0; n < 2000; n++) {
3853 		if (IWK_READ(sc, CSR_RESET) &
3854 		    CSR_RESET_REG_FLAG_MASTER_DISABLED)
3855 			break;
3856 		DELAY(1000);
3857 	}
3858 	if (n == 2000)
3859 		IWK_DBG((IWK_DEBUG_HW,
3860 		    "timeout waiting for master stop\n"));
3861 }
3862 
3863 static int
3864 iwk_power_up(iwk_sc_t *sc)
3865 {
3866 	uint32_t tmp;
3867 
3868 	iwk_mac_access_enter(sc);
3869 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3870 	tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC;
3871 	tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN;
3872 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3873 	iwk_mac_access_exit(sc);
3874 
3875 	DELAY(5000);
3876 	return (IWK_SUCCESS);
3877 }
3878 
3879 static int
3880 iwk_preinit(iwk_sc_t *sc)
3881 {
3882 	uint32_t tmp;
3883 	int n;
3884 	uint8_t vlink;
3885 
3886 	/* clear any pending interrupts */
3887 	IWK_WRITE(sc, CSR_INT, 0xffffffff);
3888 
3889 	tmp = IWK_READ(sc, CSR_GIO_CHICKEN_BITS);
3890 	IWK_WRITE(sc, CSR_GIO_CHICKEN_BITS,
3891 	    tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
3892 
3893 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
3894 	IWK_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
3895 
3896 	/* wait for clock ready */
3897 	for (n = 0; n < 1000; n++) {
3898 		if (IWK_READ(sc, CSR_GP_CNTRL) &
3899 		    CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY)
3900 			break;
3901 		DELAY(10);
3902 	}
3903 	if (n == 1000) {
3904 		cmn_err(CE_WARN,
3905 		    "iwk_preinit(): timeout waiting for clock ready\n");
3906 		return (ETIMEDOUT);
3907 	}
3908 	iwk_mac_access_enter(sc);
3909 	tmp = iwk_reg_read(sc, APMG_CLK_CTRL_REG);
3910 	iwk_reg_write(sc, APMG_CLK_CTRL_REG, tmp |
3911 	    APMG_CLK_REG_VAL_DMA_CLK_RQT | APMG_CLK_REG_VAL_BSM_CLK_RQT);
3912 
3913 	DELAY(20);
3914 	tmp = iwk_reg_read(sc, ALM_APMG_PCIDEV_STT);
3915 	iwk_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp |
3916 	    APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE);
3917 	iwk_mac_access_exit(sc);
3918 
3919 	IWK_WRITE(sc, CSR_INT_COALESCING, 512 / 32); /* ??? */
3920 
3921 	(void) iwk_power_up(sc);
3922 
3923 	if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) {
3924 		tmp = ddi_get32(sc->sc_cfg_handle,
3925 		    (uint32_t *)(sc->sc_cfg_base + 0xe8));
3926 		ddi_put32(sc->sc_cfg_handle,
3927 		    (uint32_t *)(sc->sc_cfg_base + 0xe8),
3928 		    tmp & ~(1 << 11));
3929 	}
3930 
3931 
3932 	vlink = ddi_get8(sc->sc_cfg_handle,
3933 	    (uint8_t *)(sc->sc_cfg_base + 0xf0));
3934 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0),
3935 	    vlink & ~2);
3936 
3937 	tmp = IWK_READ(sc, CSR_SW_VER);
3938 	tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
3939 	    CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
3940 	    CSR_HW_IF_CONFIG_REG_BIT_KEDRON_R;
3941 	IWK_WRITE(sc, CSR_SW_VER, tmp);
3942 
3943 	/* make sure power supply on each part of the hardware */
3944 	iwk_mac_access_enter(sc);
3945 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3946 	tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
3947 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3948 	DELAY(5);
3949 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3950 	tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
3951 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3952 	iwk_mac_access_exit(sc);
3953 	return (IWK_SUCCESS);
3954 }
3955 
3956 /*
3957  * set up semphore flag to own EEPROM
3958  */
3959 static int iwk_eep_sem_down(iwk_sc_t *sc)
3960 {
3961 	int count1, count2;
3962 	uint32_t tmp;
3963 
3964 	for (count1 = 0; count1 < 1000; count1++) {
3965 		tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG);
3966 		IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG,
3967 		    tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM);
3968 
3969 		for (count2 = 0; count2 < 2; count2++) {
3970 			if (IWK_READ(sc, CSR_HW_IF_CONFIG_REG) &
3971 			    CSR_HW_IF_CONFIG_REG_EEP_SEM)
3972 				return (IWK_SUCCESS);
3973 			DELAY(10000);
3974 		}
3975 	}
3976 	return (IWK_FAIL);
3977 }
3978 
3979 /*
3980  * reset semphore flag to release EEPROM
3981  */
3982 static void iwk_eep_sem_up(iwk_sc_t *sc)
3983 {
3984 	uint32_t tmp;
3985 
3986 	tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG);
3987 	IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG,
3988 	    tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM));
3989 }
3990 
3991 /*
3992  * This function load all infomation in eeprom into iwk_eep
3993  * structure in iwk_sc_t structure
3994  */
3995 static int iwk_eep_load(iwk_sc_t *sc)
3996 {
3997 	int i, rr;
3998 	uint32_t rv, tmp, eep_gp;
3999 	uint16_t addr, eep_sz = sizeof (sc->sc_eep_map);
4000 	uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map;
4001 
4002 	/* read eeprom gp register in CSR */
4003 	eep_gp = IWK_READ(sc, CSR_EEPROM_GP);
4004 	if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) ==
4005 	    CSR_EEPROM_GP_BAD_SIGNATURE) {
4006 		cmn_err(CE_WARN, "EEPROM not found\n");
4007 		return (IWK_FAIL);
4008 	}
4009 
4010 	rr = iwk_eep_sem_down(sc);
4011 	if (rr != 0) {
4012 		cmn_err(CE_WARN, "failed to own EEPROM\n");
4013 		return (IWK_FAIL);
4014 	}
4015 
4016 	for (addr = 0; addr < eep_sz; addr += 2) {
4017 		IWK_WRITE(sc, CSR_EEPROM_REG, addr<<1);
4018 		tmp = IWK_READ(sc, CSR_EEPROM_REG);
4019 		IWK_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2));
4020 
4021 		for (i = 0; i < 10; i++) {
4022 			rv = IWK_READ(sc, CSR_EEPROM_REG);
4023 			if (rv & 1)
4024 				break;
4025 			DELAY(10);
4026 		}
4027 
4028 		if (!(rv & 1)) {
4029 			cmn_err(CE_WARN, "time out when read EEPROM\n");
4030 			iwk_eep_sem_up(sc);
4031 			return (IWK_FAIL);
4032 		}
4033 
4034 		eep_p[addr/2] = rv >> 16;
4035 	}
4036 
4037 	iwk_eep_sem_up(sc);
4038 	return (IWK_SUCCESS);
4039 }
4040 
4041 /*
4042  * init mac address in ieee80211com_t struct
4043  */
4044 static void iwk_get_mac_from_eep(iwk_sc_t *sc)
4045 {
4046 	ieee80211com_t *ic = &sc->sc_ic;
4047 	struct iwk_eep *ep = &sc->sc_eep_map;
4048 
4049 	IEEE80211_ADDR_COPY(ic->ic_macaddr, ep->mac_address);
4050 
4051 	IWK_DBG((IWK_DEBUG_EEPROM, "mac:%2x:%2x:%2x:%2x:%2x:%2x\n",
4052 	    ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2],
4053 	    ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5]));
4054 }
4055 
4056 static int
4057 iwk_init(iwk_sc_t *sc)
4058 {
4059 	int qid, n, err;
4060 	clock_t clk;
4061 	uint32_t tmp;
4062 
4063 	mutex_enter(&sc->sc_glock);
4064 	sc->sc_flags &= ~IWK_F_FW_INIT;
4065 
4066 	(void) iwk_preinit(sc);
4067 
4068 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
4069 	if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) {
4070 		cmn_err(CE_WARN, "iwk_init(): Radio transmitter is off\n");
4071 		goto fail1;
4072 	}
4073 
4074 	/* init Rx ring */
4075 	iwk_mac_access_enter(sc);
4076 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
4077 
4078 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
4079 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
4080 	    sc->sc_rxq.dma_desc.cookie.dmac_address >> 8);
4081 
4082 	IWK_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG,
4083 	    ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address +
4084 	    offsetof(struct iwk_shared, val0)) >> 4));
4085 
4086 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG,
4087 	    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
4088 	    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
4089 	    IWK_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
4090 	    (RX_QUEUE_SIZE_LOG <<
4091 	    FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
4092 	iwk_mac_access_exit(sc);
4093 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG,
4094 	    (RX_QUEUE_SIZE - 1) & ~0x7);
4095 
4096 	/* init Tx rings */
4097 	iwk_mac_access_enter(sc);
4098 	iwk_reg_write(sc, SCD_TXFACT, 0);
4099 
4100 	/* keep warm page */
4101 	iwk_reg_write(sc, IWK_FH_KW_MEM_ADDR_REG,
4102 	    sc->sc_dma_kw.cookie.dmac_address >> 4);
4103 
4104 	for (qid = 0; qid < IWK_NUM_QUEUES; qid++) {
4105 		IWK_WRITE(sc, FH_MEM_CBBC_QUEUE(qid),
4106 		    sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8);
4107 		IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(qid),
4108 		    IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
4109 		    IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
4110 	}
4111 	iwk_mac_access_exit(sc);
4112 
4113 	/* clear "radio off" and "disable command" bits */
4114 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
4115 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR,
4116 	    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4117 
4118 	/* clear any pending interrupts */
4119 	IWK_WRITE(sc, CSR_INT, 0xffffffff);
4120 
4121 	/* enable interrupts */
4122 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
4123 
4124 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
4125 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
4126 
4127 	/*
4128 	 * backup ucode data part for future use.
4129 	 */
4130 	(void) memcpy(sc->sc_dma_fw_data_bak.mem_va,
4131 	    sc->sc_dma_fw_data.mem_va,
4132 	    sc->sc_dma_fw_data.alength);
4133 
4134 	for (n = 0; n < 2; n++) {
4135 		/* load firmware init segment into NIC */
4136 		err = iwk_load_firmware(sc);
4137 		if (err != IWK_SUCCESS) {
4138 			cmn_err(CE_WARN, "iwk_init(): "
4139 			    "failed to setup boot firmware\n");
4140 			continue;
4141 		}
4142 
4143 		/* now press "execute" start running */
4144 		IWK_WRITE(sc, CSR_RESET, 0);
4145 		break;
4146 	}
4147 	if (n == 2) {
4148 		cmn_err(CE_WARN, "iwk_init(): failed to load firmware\n");
4149 		goto fail1;
4150 	}
4151 	/* ..and wait at most one second for adapter to initialize */
4152 	clk = ddi_get_lbolt() + drv_usectohz(2000000);
4153 	while (!(sc->sc_flags & IWK_F_FW_INIT)) {
4154 		if (cv_timedwait(&sc->sc_fw_cv, &sc->sc_glock, clk) < 0)
4155 			break;
4156 	}
4157 	if (!(sc->sc_flags & IWK_F_FW_INIT)) {
4158 		cmn_err(CE_WARN,
4159 		    "iwk_init(): timeout waiting for firmware init\n");
4160 		goto fail1;
4161 	}
4162 
4163 	/*
4164 	 * at this point, the firmware is loaded OK, then config the hardware
4165 	 * with the ucode API, including rxon, txpower, etc.
4166 	 */
4167 	err = iwk_config(sc);
4168 	if (err) {
4169 		cmn_err(CE_WARN, "iwk_init(): failed to configure device\n");
4170 		goto fail1;
4171 	}
4172 
4173 	/* at this point, hardware may receive beacons :) */
4174 	mutex_exit(&sc->sc_glock);
4175 	return (IWK_SUCCESS);
4176 
4177 fail1:
4178 	err = IWK_FAIL;
4179 	mutex_exit(&sc->sc_glock);
4180 	return (err);
4181 }
4182 
4183 static void
4184 iwk_stop(iwk_sc_t *sc)
4185 {
4186 	uint32_t tmp;
4187 	int i;
4188 
4189 	if (!(sc->sc_flags & IWK_F_QUIESCED))
4190 		mutex_enter(&sc->sc_glock);
4191 
4192 	IWK_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
4193 	/* disable interrupts */
4194 	IWK_WRITE(sc, CSR_INT_MASK, 0);
4195 	IWK_WRITE(sc, CSR_INT, CSR_INI_SET_MASK);
4196 	IWK_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff);
4197 
4198 	/* reset all Tx rings */
4199 	for (i = 0; i < IWK_NUM_QUEUES; i++)
4200 		iwk_reset_tx_ring(sc, &sc->sc_txq[i]);
4201 
4202 	/* reset Rx ring */
4203 	iwk_reset_rx_ring(sc);
4204 
4205 	iwk_mac_access_enter(sc);
4206 	iwk_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4207 	iwk_mac_access_exit(sc);
4208 
4209 	DELAY(5);
4210 
4211 	iwk_stop_master(sc);
4212 
4213 	sc->sc_tx_timer = 0;
4214 	tmp = IWK_READ(sc, CSR_RESET);
4215 	IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET);
4216 
4217 	if (!(sc->sc_flags & IWK_F_QUIESCED))
4218 		mutex_exit(&sc->sc_glock);
4219 }
4220 
4221 /*
4222  * Naive implementation of the Adaptive Multi Rate Retry algorithm:
4223  * "IEEE 802.11 Rate Adaptation: A Practical Approach"
4224  * Mathieu Lacage, Hossein Manshaei, Thierry Turletti
4225  * INRIA Sophia - Projet Planete
4226  * http://www-sop.inria.fr/rapports/sophia/RR-5208.html
4227  */
4228 #define	is_success(amrr)	\
4229 	((amrr)->retrycnt < (amrr)->txcnt / 10)
4230 #define	is_failure(amrr)	\
4231 	((amrr)->retrycnt > (amrr)->txcnt / 3)
4232 #define	is_enough(amrr)		\
4233 	((amrr)->txcnt > 100)
4234 #define	is_min_rate(in)		\
4235 	((in)->in_txrate == 0)
4236 #define	is_max_rate(in)		\
4237 	((in)->in_txrate == (in)->in_rates.ir_nrates - 1)
4238 #define	increase_rate(in)	\
4239 	((in)->in_txrate++)
4240 #define	decrease_rate(in)	\
4241 	((in)->in_txrate--)
4242 #define	reset_cnt(amrr)		\
4243 	{ (amrr)->txcnt = (amrr)->retrycnt = 0; }
4244 
4245 #define	IWK_AMRR_MIN_SUCCESS_THRESHOLD	 1
4246 #define	IWK_AMRR_MAX_SUCCESS_THRESHOLD	15
4247 
4248 static void
4249 iwk_amrr_init(iwk_amrr_t *amrr)
4250 {
4251 	amrr->success = 0;
4252 	amrr->recovery = 0;
4253 	amrr->txcnt = amrr->retrycnt = 0;
4254 	amrr->success_threshold = IWK_AMRR_MIN_SUCCESS_THRESHOLD;
4255 }
4256 
4257 static void
4258 iwk_amrr_timeout(iwk_sc_t *sc)
4259 {
4260 	ieee80211com_t *ic = &sc->sc_ic;
4261 
4262 	IWK_DBG((IWK_DEBUG_RATECTL, "iwk_amrr_timeout() enter\n"));
4263 	if (ic->ic_opmode == IEEE80211_M_STA)
4264 		iwk_amrr_ratectl(NULL, ic->ic_bss);
4265 	else
4266 		ieee80211_iterate_nodes(&ic->ic_sta, iwk_amrr_ratectl, NULL);
4267 	sc->sc_clk = ddi_get_lbolt();
4268 }
4269 
4270 /* ARGSUSED */
4271 static void
4272 iwk_amrr_ratectl(void *arg, ieee80211_node_t *in)
4273 {
4274 	iwk_amrr_t *amrr = (iwk_amrr_t *)in;
4275 	int need_change = 0;
4276 
4277 	if (is_success(amrr) && is_enough(amrr)) {
4278 		amrr->success++;
4279 		if (amrr->success >= amrr->success_threshold &&
4280 		    !is_max_rate(in)) {
4281 			amrr->recovery = 1;
4282 			amrr->success = 0;
4283 			increase_rate(in);
4284 			IWK_DBG((IWK_DEBUG_RATECTL,
4285 			    "AMRR increasing rate %d (txcnt=%d retrycnt=%d)\n",
4286 			    in->in_txrate, amrr->txcnt, amrr->retrycnt));
4287 			need_change = 1;
4288 		} else {
4289 			amrr->recovery = 0;
4290 		}
4291 	} else if (is_failure(amrr)) {
4292 		amrr->success = 0;
4293 		if (!is_min_rate(in)) {
4294 			if (amrr->recovery) {
4295 				amrr->success_threshold++;
4296 				if (amrr->success_threshold >
4297 				    IWK_AMRR_MAX_SUCCESS_THRESHOLD)
4298 					amrr->success_threshold =
4299 					    IWK_AMRR_MAX_SUCCESS_THRESHOLD;
4300 			} else {
4301 				amrr->success_threshold =
4302 				    IWK_AMRR_MIN_SUCCESS_THRESHOLD;
4303 			}
4304 			decrease_rate(in);
4305 			IWK_DBG((IWK_DEBUG_RATECTL,
4306 			    "AMRR decreasing rate %d (txcnt=%d retrycnt=%d)\n",
4307 			    in->in_txrate, amrr->txcnt, amrr->retrycnt));
4308 			need_change = 1;
4309 		}
4310 		amrr->recovery = 0;	/* paper is incorrect */
4311 	}
4312 
4313 	if (is_enough(amrr) || need_change)
4314 		reset_cnt(amrr);
4315 }
4316 
4317 /*
4318  * calculate 4965 chipset's kelvin temperature according to
4319  * the data of init alive and satistics notification.
4320  * The details is described in iwk_calibration.h file
4321  */
4322 static int32_t iwk_curr_tempera(iwk_sc_t *sc)
4323 {
4324 	int32_t  tempera;
4325 	int32_t  r1, r2, r3;
4326 	uint32_t  r4_u;
4327 	int32_t   r4_s;
4328 
4329 	if (iwk_is_fat_channel(sc)) {
4330 		r1 = (int32_t)(sc->sc_card_alive_init.therm_r1[1]);
4331 		r2 = (int32_t)(sc->sc_card_alive_init.therm_r2[1]);
4332 		r3 = (int32_t)(sc->sc_card_alive_init.therm_r3[1]);
4333 		r4_u = sc->sc_card_alive_init.therm_r4[1];
4334 	} else {
4335 		r1 = (int32_t)(sc->sc_card_alive_init.therm_r1[0]);
4336 		r2 = (int32_t)(sc->sc_card_alive_init.therm_r2[0]);
4337 		r3 = (int32_t)(sc->sc_card_alive_init.therm_r3[0]);
4338 		r4_u = sc->sc_card_alive_init.therm_r4[0];
4339 	}
4340 
4341 	if (sc->sc_flags & IWK_F_STATISTICS) {
4342 		r4_s = (int32_t)(sc->sc_statistics.general.temperature <<
4343 		    (31-23)) >> (31-23);
4344 	} else {
4345 		r4_s = (int32_t)(r4_u << (31-23)) >> (31-23);
4346 	}
4347 
4348 	IWK_DBG((IWK_DEBUG_CALIBRATION, "temperature R[1-4]: %d %d %d %d\n",
4349 	    r1, r2, r3, r4_s));
4350 
4351 	if (r3 == r1) {
4352 		cmn_err(CE_WARN, "iwk_curr_tempera(): "
4353 		    "failed to calculate temperature"
4354 		    "because r3 = r1\n");
4355 		return (DDI_FAILURE);
4356 	}
4357 
4358 	tempera = TEMPERATURE_CALIB_A_VAL * (r4_s - r2);
4359 	tempera /= (r3 - r1);
4360 	tempera = (tempera*97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
4361 
4362 	IWK_DBG((IWK_DEBUG_CALIBRATION, "calculated temperature: %dK, %dC\n",
4363 	    tempera, KELVIN_TO_CELSIUS(tempera)));
4364 
4365 	return (tempera);
4366 }
4367 
4368 /* Determine whether 4965 is using 2.4 GHz band */
4369 static inline int iwk_is_24G_band(iwk_sc_t *sc)
4370 {
4371 	return (sc->sc_config.flags & RXON_FLG_BAND_24G_MSK);
4372 }
4373 
4374 /* Determine whether 4965 is using fat channel */
4375 static inline int iwk_is_fat_channel(iwk_sc_t *sc)
4376 {
4377 	return ((sc->sc_config.flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
4378 	    (sc->sc_config.flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK));
4379 }
4380 
4381 /*
4382  * In MIMO mode, determine which group 4965's current channel belong to.
4383  * For more infomation about "channel group",
4384  * please refer to iwk_calibration.h file
4385  */
4386 static int iwk_txpower_grp(uint16_t channel)
4387 {
4388 	if (channel >= CALIB_IWK_TX_ATTEN_GR5_FCH &&
4389 	    channel <= CALIB_IWK_TX_ATTEN_GR5_LCH) {
4390 		return (CALIB_CH_GROUP_5);
4391 	}
4392 
4393 	if (channel >= CALIB_IWK_TX_ATTEN_GR1_FCH &&
4394 	    channel <= CALIB_IWK_TX_ATTEN_GR1_LCH) {
4395 		return (CALIB_CH_GROUP_1);
4396 	}
4397 
4398 	if (channel >= CALIB_IWK_TX_ATTEN_GR2_FCH &&
4399 	    channel <= CALIB_IWK_TX_ATTEN_GR2_LCH) {
4400 		return (CALIB_CH_GROUP_2);
4401 	}
4402 
4403 	if (channel >= CALIB_IWK_TX_ATTEN_GR3_FCH &&
4404 	    channel <= CALIB_IWK_TX_ATTEN_GR3_LCH) {
4405 		return (CALIB_CH_GROUP_3);
4406 	}
4407 
4408 	if (channel >= CALIB_IWK_TX_ATTEN_GR4_FCH &&
4409 	    channel <= CALIB_IWK_TX_ATTEN_GR4_LCH) {
4410 		return (CALIB_CH_GROUP_4);
4411 	}
4412 
4413 	cmn_err(CE_WARN, "iwk_txpower_grp(): "
4414 	    "can't find txpower group for channel %d.\n", channel);
4415 
4416 	return (DDI_FAILURE);
4417 }
4418 
4419 /* 2.4 GHz */
4420 static uint16_t iwk_eep_band_1[14] = {
4421 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
4422 };
4423 
4424 /* 5.2 GHz bands */
4425 static uint16_t iwk_eep_band_2[13] = {
4426 	183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
4427 };
4428 
4429 static uint16_t iwk_eep_band_3[12] = {
4430 	34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
4431 };
4432 
4433 static uint16_t iwk_eep_band_4[11] = {
4434 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
4435 };
4436 
4437 static uint16_t iwk_eep_band_5[6] = {
4438 	145, 149, 153, 157, 161, 165
4439 };
4440 
4441 static uint16_t iwk_eep_band_6[7] = {
4442 	1, 2, 3, 4, 5, 6, 7
4443 };
4444 
4445 static uint16_t iwk_eep_band_7[11] = {
4446 	36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
4447 };
4448 
4449 /* Get regulatory data from eeprom for a given channel */
4450 static struct iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc,
4451     uint16_t channel,
4452     int is_24G, int is_fat, int is_hi_chan)
4453 {
4454 	int32_t i;
4455 	uint16_t chan;
4456 
4457 	if (is_fat) {  /* 11n mode */
4458 
4459 		if (is_hi_chan) {
4460 			chan = channel - 4;
4461 		} else {
4462 			chan = channel;
4463 		}
4464 
4465 		for (i = 0; i < 7; i++) {
4466 			if (iwk_eep_band_6[i] == chan) {
4467 				return (&sc->sc_eep_map.band_24_channels[i]);
4468 			}
4469 		}
4470 		for (i = 0; i < 11; i++) {
4471 			if (iwk_eep_band_7[i] == chan) {
4472 				return (&sc->sc_eep_map.band_52_channels[i]);
4473 			}
4474 		}
4475 	} else if (is_24G) {  /* 2.4 GHz band */
4476 		for (i = 0; i < 14; i++) {
4477 			if (iwk_eep_band_1[i] == channel) {
4478 				return (&sc->sc_eep_map.band_1_channels[i]);
4479 			}
4480 		}
4481 	} else {  /* 5 GHz band */
4482 		for (i = 0; i < 13; i++) {
4483 			if (iwk_eep_band_2[i] == channel) {
4484 				return (&sc->sc_eep_map.band_2_channels[i]);
4485 			}
4486 		}
4487 		for (i = 0; i < 12; i++) {
4488 			if (iwk_eep_band_3[i] == channel) {
4489 				return (&sc->sc_eep_map.band_3_channels[i]);
4490 			}
4491 		}
4492 		for (i = 0; i < 11; i++) {
4493 			if (iwk_eep_band_4[i] == channel) {
4494 				return (&sc->sc_eep_map.band_4_channels[i]);
4495 			}
4496 		}
4497 		for (i = 0; i < 6; i++) {
4498 			if (iwk_eep_band_5[i] == channel) {
4499 				return (&sc->sc_eep_map.band_5_channels[i]);
4500 			}
4501 		}
4502 	}
4503 
4504 	return (NULL);
4505 }
4506 
4507 /*
4508  * Determine which subband a given channel belongs
4509  * to in 2.4 GHz or 5 GHz band
4510  */
4511 static int32_t iwk_band_number(iwk_sc_t *sc, uint16_t channel)
4512 {
4513 	int32_t b_n = -1;
4514 
4515 	for (b_n = 0; b_n < EEP_TX_POWER_BANDS; b_n++) {
4516 		if (0 == sc->sc_eep_map.calib_info.band_info_tbl[b_n].ch_from) {
4517 			continue;
4518 		}
4519 
4520 		if ((channel >=
4521 		    (uint16_t)sc->sc_eep_map.calib_info.
4522 		    band_info_tbl[b_n].ch_from) &&
4523 		    (channel <=
4524 		    (uint16_t)sc->sc_eep_map.calib_info.
4525 		    band_info_tbl[b_n].ch_to)) {
4526 			break;
4527 		}
4528 	}
4529 
4530 	return (b_n);
4531 }
4532 
4533 /* Make a special division for interpolation operation */
4534 static int iwk_division(int32_t num, int32_t denom, int32_t *res)
4535 {
4536 	int32_t sign = 1;
4537 
4538 	if (num < 0) {
4539 		sign = -sign;
4540 		num = -num;
4541 	}
4542 
4543 	if (denom < 0) {
4544 		sign = -sign;
4545 		denom = -denom;
4546 	}
4547 
4548 	*res = ((num*2 + denom) / (denom*2)) * sign;
4549 
4550 	return (IWK_SUCCESS);
4551 }
4552 
4553 /* Make interpolation operation */
4554 static int32_t iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1,
4555     int32_t x2, int32_t y2)
4556 {
4557 	int32_t val;
4558 
4559 	if (x2 == x1) {
4560 		return (y1);
4561 	} else {
4562 		(void) iwk_division((x2-x)*(y1-y2), (x2-x1), &val);
4563 		return (val + y2);
4564 	}
4565 }
4566 
4567 /* Get interpolation measurement data of a given channel for all chains. */
4568 static int iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel,
4569     struct iwk_eep_calib_channel_info *chan_info)
4570 {
4571 	int32_t ban_n;
4572 	uint32_t ch1_n, ch2_n;
4573 	int32_t c, m;
4574 	struct iwk_eep_calib_measure *m1_p, *m2_p, *m_p;
4575 
4576 	/* determine subband number */
4577 	ban_n = iwk_band_number(sc, channel);
4578 	if (ban_n >= EEP_TX_POWER_BANDS) {
4579 		return (DDI_FAILURE);
4580 	}
4581 
4582 	ch1_n =
4583 	    (uint32_t)sc->sc_eep_map.calib_info.band_info_tbl[ban_n].ch1.ch_num;
4584 	ch2_n =
4585 	    (uint32_t)sc->sc_eep_map.calib_info.band_info_tbl[ban_n].ch2.ch_num;
4586 
4587 	chan_info->ch_num = (uint8_t)channel;  /* given channel number */
4588 
4589 	/*
4590 	 * go through all chains on chipset
4591 	 */
4592 	for (c = 0; c < EEP_TX_POWER_TX_CHAINS; c++) {
4593 		/*
4594 		 * go through all factory measurements
4595 		 */
4596 		for (m = 0; m < EEP_TX_POWER_MEASUREMENTS; m++) {
4597 			m1_p =
4598 			    &(sc->sc_eep_map.calib_info.
4599 			    band_info_tbl[ban_n].ch1.measure[c][m]);
4600 			m2_p =
4601 			    &(sc->sc_eep_map.calib_info.band_info_tbl[ban_n].
4602 			    ch2.measure[c][m]);
4603 			m_p = &(chan_info->measure[c][m]);
4604 
4605 			/*
4606 			 * make interpolation to get actual
4607 			 * Tx power for given channel
4608 			 */
4609 			m_p->actual_pow = iwk_interpolate_value(channel,
4610 			    ch1_n, m1_p->actual_pow,
4611 			    ch2_n, m2_p->actual_pow);
4612 
4613 			/* make interpolation to get index into gain table */
4614 			m_p->gain_idx = iwk_interpolate_value(channel,
4615 			    ch1_n, m1_p->gain_idx,
4616 			    ch2_n, m2_p->gain_idx);
4617 
4618 			/* make interpolation to get chipset temperature */
4619 			m_p->temperature = iwk_interpolate_value(channel,
4620 			    ch1_n, m1_p->temperature,
4621 			    ch2_n, m2_p->temperature);
4622 
4623 			/*
4624 			 * make interpolation to get power
4625 			 * amp detector level
4626 			 */
4627 			m_p->pa_det = iwk_interpolate_value(channel, ch1_n,
4628 			    m1_p->pa_det,
4629 			    ch2_n, m2_p->pa_det);
4630 		}
4631 	}
4632 
4633 	return (IWK_SUCCESS);
4634 }
4635 
4636 /*
4637  * Calculate voltage compensation for Tx power. For more infomation,
4638  * please refer to iwk_calibration.h file
4639  */
4640 static int32_t iwk_voltage_compensation(int32_t eep_voltage,
4641     int32_t curr_voltage)
4642 {
4643 	int32_t vol_comp = 0;
4644 
4645 	if ((TX_POWER_IWK_ILLEGAL_VOLTAGE == eep_voltage) ||
4646 	    (TX_POWER_IWK_ILLEGAL_VOLTAGE == curr_voltage)) {
4647 		return (vol_comp);
4648 	}
4649 
4650 	(void) iwk_division(curr_voltage-eep_voltage,
4651 	    TX_POWER_IWK_VOLTAGE_CODES_PER_03V, &vol_comp);
4652 
4653 	if (curr_voltage > eep_voltage) {
4654 		vol_comp *= 2;
4655 	}
4656 	if ((vol_comp < -2) || (vol_comp > 2)) {
4657 		vol_comp = 0;
4658 	}
4659 
4660 	return (vol_comp);
4661 }
4662 
4663 /*
4664  * Thermal compensation values for txpower for various frequency ranges ...
4665  * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust
4666  */
4667 static struct iwk_txpower_tempera_comp {
4668 	int32_t degrees_per_05db_a;
4669 	int32_t degrees_per_05db_a_denom;
4670 } txpower_tempera_comp_table[CALIB_CH_GROUP_MAX] = {
4671 	{9, 2},			/* group 0 5.2, ch  34-43 */
4672 	{4, 1},			/* group 1 5.2, ch  44-70 */
4673 	{4, 1},			/* group 2 5.2, ch  71-124 */
4674 	{4, 1},			/* group 3 5.2, ch 125-200 */
4675 	{3, 1}			/* group 4 2.4, ch   all */
4676 };
4677 
4678 /*
4679  * bit-rate-dependent table to prevent Tx distortion, in half-dB units,
4680  * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates.
4681  */
4682 static int32_t back_off_table[] = {
4683 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
4684 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
4685 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
4686 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
4687 	10			/* CCK */
4688 };
4689 
4690 /* determine minimum Tx power index in gain table */
4691 static int32_t iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G)
4692 {
4693 	if ((!is_24G) && ((rate_pow_idx & 7) <= 4)) {
4694 		return (MIN_TX_GAIN_INDEX_52GHZ_EXT);
4695 	}
4696 
4697 	return (MIN_TX_GAIN_INDEX);
4698 }
4699 
4700 /*
4701  * Determine DSP and radio gain according to temperature and other factors.
4702  * This function is the majority of Tx power calibration
4703  */
4704 static int iwk_txpower_table_cmd_init(iwk_sc_t *sc,
4705     struct iwk_tx_power_db *tp_db)
4706 {
4707 	int is_24G, is_fat, is_high_chan, is_mimo;
4708 	int c, r;
4709 	int32_t target_power;
4710 	int32_t tx_grp = CALIB_CH_GROUP_MAX;
4711 	uint16_t channel;
4712 	uint8_t saturation_power;
4713 	int32_t regu_power;
4714 	int32_t curr_regu_power;
4715 	struct iwk_eep_channel *eep_chan_p;
4716 	struct iwk_eep_calib_channel_info eep_chan_calib;
4717 	int32_t eep_voltage, init_voltage;
4718 	int32_t voltage_compensation;
4719 	int32_t temperature;
4720 	int32_t degrees_per_05db_num;
4721 	int32_t degrees_per_05db_denom;
4722 	struct iwk_eep_calib_measure *measure_p;
4723 	int32_t interpo_temp;
4724 	int32_t power_limit;
4725 	int32_t atten_value;
4726 	int32_t tempera_comp[2];
4727 	int32_t interpo_gain_idx[2];
4728 	int32_t interpo_actual_pow[2];
4729 	union iwk_tx_power_dual_stream txpower_gains;
4730 	int32_t txpower_gains_idx;
4731 
4732 	channel = sc->sc_config.chan;
4733 
4734 	/* 2.4 GHz or 5 GHz band */
4735 	is_24G = iwk_is_24G_band(sc);
4736 
4737 	/* fat channel or not */
4738 	is_fat = iwk_is_fat_channel(sc);
4739 
4740 	/*
4741 	 * using low half channel number or high half channel number
4742 	 * identify fat channel
4743 	 */
4744 	if (is_fat && (sc->sc_config.flags &
4745 	    RXON_FLG_CONTROL_CHANNEL_LOC_HIGH_MSK)) {
4746 		is_high_chan = 1;
4747 	}
4748 
4749 	if ((channel > 0) && (channel < 200)) {
4750 		/* get regulatory channel data from eeprom */
4751 		eep_chan_p = iwk_get_eep_channel(sc, channel, is_24G,
4752 		    is_fat, is_high_chan);
4753 		if (NULL == eep_chan_p) {
4754 			cmn_err(CE_WARN,
4755 			    "iwk_txpower_table_cmd_init(): "
4756 			    "can't get channel infomation\n");
4757 			return (DDI_FAILURE);
4758 		}
4759 	} else {
4760 		cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4761 		    "channel(%d) isn't in proper range\n",
4762 		    channel);
4763 		return (DDI_FAILURE);
4764 	}
4765 
4766 	/* initial value of Tx power */
4767 	sc->sc_user_txpower = (int32_t)eep_chan_p->max_power_avg;
4768 	if (sc->sc_user_txpower < IWK_TX_POWER_TARGET_POWER_MIN) {
4769 		cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4770 		    "user TX power is too weak\n");
4771 		return (DDI_FAILURE);
4772 	} else if (sc->sc_user_txpower > IWK_TX_POWER_TARGET_POWER_MAX) {
4773 		cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4774 		    "user TX power is too strong\n");
4775 		return (DDI_FAILURE);
4776 	}
4777 
4778 	target_power = 2 * sc->sc_user_txpower;
4779 
4780 	/* determine which group current channel belongs to */
4781 	tx_grp = iwk_txpower_grp(channel);
4782 	if (tx_grp < 0) {
4783 		return (tx_grp);
4784 	}
4785 
4786 
4787 	if (is_fat) {
4788 		if (is_high_chan) {
4789 			channel -= 2;
4790 		} else {
4791 			channel += 2;
4792 		}
4793 	}
4794 
4795 	/* determine saturation power */
4796 	if (is_24G) {
4797 		saturation_power =
4798 		    sc->sc_eep_map.calib_info.saturation_power24;
4799 	} else {
4800 		saturation_power =
4801 		    sc->sc_eep_map.calib_info.saturation_power52;
4802 	}
4803 
4804 	if (saturation_power < IWK_TX_POWER_SATURATION_MIN ||
4805 	    saturation_power > IWK_TX_POWER_SATURATION_MAX) {
4806 		if (is_24G) {
4807 			saturation_power = IWK_TX_POWER_DEFAULT_SATURATION_24;
4808 		} else {
4809 			saturation_power = IWK_TX_POWER_DEFAULT_SATURATION_52;
4810 		}
4811 	}
4812 
4813 	/* determine regulatory power */
4814 	regu_power = (int32_t)eep_chan_p->max_power_avg * 2;
4815 	if ((regu_power < IWK_TX_POWER_REGULATORY_MIN) ||
4816 	    (regu_power > IWK_TX_POWER_REGULATORY_MAX)) {
4817 		if (is_24G) {
4818 			regu_power = IWK_TX_POWER_DEFAULT_REGULATORY_24;
4819 		} else {
4820 			regu_power = IWK_TX_POWER_DEFAULT_REGULATORY_52;
4821 		}
4822 	}
4823 
4824 	/*
4825 	 * get measurement data for current channel
4826 	 * suach as temperature,index to gain table,actual Tx power
4827 	 */
4828 	(void) iwk_channel_interpolate(sc, channel, &eep_chan_calib);
4829 
4830 	eep_voltage = (int32_t)sc->sc_eep_map.calib_info.voltage;
4831 	init_voltage = (int32_t)sc->sc_card_alive_init.voltage;
4832 
4833 	/* calculate voltage compensation to Tx power */
4834 	voltage_compensation =
4835 	    iwk_voltage_compensation(eep_voltage, init_voltage);
4836 
4837 	if (sc->sc_tempera >= IWK_TX_POWER_TEMPERATURE_MIN) {
4838 		temperature = sc->sc_tempera;
4839 	} else {
4840 		temperature = IWK_TX_POWER_TEMPERATURE_MIN;
4841 	}
4842 	if (sc->sc_tempera <= IWK_TX_POWER_TEMPERATURE_MAX) {
4843 		temperature = sc->sc_tempera;
4844 	} else {
4845 		temperature = IWK_TX_POWER_TEMPERATURE_MAX;
4846 	}
4847 	temperature = KELVIN_TO_CELSIUS(temperature);
4848 
4849 	degrees_per_05db_num =
4850 	    txpower_tempera_comp_table[tx_grp].degrees_per_05db_a;
4851 	degrees_per_05db_denom =
4852 	    txpower_tempera_comp_table[tx_grp].degrees_per_05db_a_denom;
4853 
4854 	for (c = 0; c < 2; c++) {  /* go through all chains */
4855 		measure_p = &eep_chan_calib.measure[c][1];
4856 		interpo_temp = measure_p->temperature;
4857 
4858 		/* determine temperature compensation to Tx power */
4859 		(void) iwk_division(
4860 		    (temperature-interpo_temp)*degrees_per_05db_denom,
4861 		    degrees_per_05db_num, &tempera_comp[c]);
4862 
4863 		interpo_gain_idx[c] = measure_p->gain_idx;
4864 		interpo_actual_pow[c] = measure_p->actual_pow;
4865 	}
4866 
4867 	/*
4868 	 * go through all rate entries in Tx power table
4869 	 */
4870 	for (r = 0; r < POWER_TABLE_NUM_ENTRIES; r++) {
4871 		if (r & 0x8) {
4872 			/* need to lower regulatory power for MIMO mode */
4873 			curr_regu_power = regu_power -
4874 			    IWK_TX_POWER_MIMO_REGULATORY_COMPENSATION;
4875 			is_mimo = 1;
4876 		} else {
4877 			curr_regu_power = regu_power;
4878 			is_mimo = 0;
4879 		}
4880 
4881 		power_limit = saturation_power - back_off_table[r];
4882 		if (power_limit > curr_regu_power) {
4883 			/* final Tx power limit */
4884 			power_limit = curr_regu_power;
4885 		}
4886 
4887 		if (target_power > power_limit) {
4888 			target_power = power_limit; /* final target Tx power */
4889 		}
4890 
4891 		for (c = 0; c < 2; c++) {	  /* go through all Tx chains */
4892 			if (is_mimo) {
4893 				atten_value =
4894 				    sc->sc_card_alive_init.tx_atten[tx_grp][c];
4895 			} else {
4896 				atten_value = 0;
4897 			}
4898 
4899 			/*
4900 			 * calculate index in gain table
4901 			 * this step is very important
4902 			 */
4903 			txpower_gains_idx = interpo_gain_idx[c] -
4904 			    (target_power - interpo_actual_pow[c]) -
4905 			    tempera_comp[c] - voltage_compensation +
4906 			    atten_value;
4907 
4908 			if (txpower_gains_idx <
4909 			    iwk_min_power_index(r, is_24G)) {
4910 				txpower_gains_idx =
4911 				    iwk_min_power_index(r, is_24G);
4912 			}
4913 
4914 			if (!is_24G) {
4915 				/*
4916 				 * support negative index for 5 GHz
4917 				 * band
4918 				 */
4919 				txpower_gains_idx += 9;
4920 			}
4921 
4922 			if (POWER_TABLE_CCK_ENTRY == r) {
4923 				/* for CCK mode, make necessary attenuaton */
4924 				txpower_gains_idx +=
4925 				    IWK_TX_POWER_CCK_COMPENSATION_C_STEP;
4926 			}
4927 
4928 			if (txpower_gains_idx > 107) {
4929 				txpower_gains_idx = 107;
4930 			} else if (txpower_gains_idx < 0) {
4931 				txpower_gains_idx = 0;
4932 			}
4933 
4934 			/* search DSP and radio gains in gain table */
4935 			txpower_gains.s.radio_tx_gain[c] =
4936 			    gains_table[is_24G][txpower_gains_idx].radio;
4937 			txpower_gains.s.dsp_predis_atten[c] =
4938 			    gains_table[is_24G][txpower_gains_idx].dsp;
4939 
4940 			IWK_DBG((IWK_DEBUG_CALIBRATION,
4941 			    "rate_index: %d, "
4942 			    "gain_index %d, c: %d,is_mimo: %d\n",
4943 			    r, txpower_gains_idx, c, is_mimo));
4944 		}
4945 
4946 		/* initialize Tx power table */
4947 		if (r < POWER_TABLE_NUM_HT_OFDM_ENTRIES) {
4948 			tp_db->ht_ofdm_power[r].dw = txpower_gains.dw;
4949 		} else {
4950 			tp_db->legacy_cck_power.dw = txpower_gains.dw;
4951 		}
4952 	}
4953 
4954 	return (IWK_SUCCESS);
4955 }
4956 
4957 /*
4958  * make Tx power calibration to adjust Tx power.
4959  * This is completed by sending out Tx power table command.
4960  */
4961 static int iwk_tx_power_calibration(iwk_sc_t *sc)
4962 {
4963 	iwk_tx_power_table_cmd_t cmd;
4964 	int rv;
4965 
4966 	if (sc->sc_flags & IWK_F_SCANNING) {
4967 		return (IWK_SUCCESS);
4968 	}
4969 
4970 	/* necessary initialization to Tx power table command */
4971 	cmd.band = (uint8_t)iwk_is_24G_band(sc);
4972 	cmd.channel = sc->sc_config.chan;
4973 	cmd.channel_normal_width = 0;
4974 
4975 	/* initialize Tx power table */
4976 	rv = iwk_txpower_table_cmd_init(sc, &cmd.tx_power);
4977 	if (rv) {
4978 		cmn_err(CE_NOTE, "rv= %d\n", rv);
4979 		return (rv);
4980 	}
4981 
4982 	/* send out Tx power table command */
4983 	rv = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &cmd, sizeof (cmd), 1);
4984 	if (rv) {
4985 		return (rv);
4986 	}
4987 
4988 	/* record current temperature */
4989 	sc->sc_last_tempera = sc->sc_tempera;
4990 
4991 	return (IWK_SUCCESS);
4992 }
4993 
4994 /* This function is the handler of statistics notification from uCode */
4995 static void iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc)
4996 {
4997 	int is_diff;
4998 	struct iwk_notif_statistics *statistics_p =
4999 	    (struct iwk_notif_statistics *)(desc + 1);
5000 
5001 	mutex_enter(&sc->sc_glock);
5002 
5003 	is_diff = (sc->sc_statistics.general.temperature !=
5004 	    statistics_p->general.temperature) ||
5005 	    ((sc->sc_statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK) !=
5006 	    (statistics_p->flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK));
5007 
5008 	/* update statistics data */
5009 	(void) memcpy(&sc->sc_statistics, statistics_p,
5010 	    sizeof (struct iwk_notif_statistics));
5011 
5012 	sc->sc_flags |= IWK_F_STATISTICS;
5013 
5014 	if (!(sc->sc_flags & IWK_F_SCANNING)) {
5015 		/* make Receiver gain balance calibration */
5016 		(void) iwk_rxgain_diff(sc);
5017 
5018 		/* make Receiver sensitivity calibration */
5019 		(void) iwk_rx_sens(sc);
5020 	}
5021 
5022 
5023 	if (!is_diff) {
5024 		mutex_exit(&sc->sc_glock);
5025 		return;
5026 	}
5027 
5028 	/* calibration current temperature of 4965 chipset */
5029 	sc->sc_tempera = iwk_curr_tempera(sc);
5030 
5031 	/* distinct temperature change will trigger Tx power calibration */
5032 	if (((sc->sc_tempera - sc->sc_last_tempera) >= 3) ||
5033 	    ((sc->sc_last_tempera - sc->sc_tempera) >= 3)) {
5034 		/* make Tx power calibration */
5035 		(void) iwk_tx_power_calibration(sc);
5036 	}
5037 
5038 	mutex_exit(&sc->sc_glock);
5039 }
5040 
5041 /* Determine this station is in associated state or not */
5042 static int iwk_is_associated(iwk_sc_t *sc)
5043 {
5044 	return (sc->sc_config.filter_flags & RXON_FILTER_ASSOC_MSK);
5045 }
5046 
5047 /* Make necessary preparation for Receiver gain balance calibration */
5048 static int iwk_rxgain_diff_init(iwk_sc_t *sc)
5049 {
5050 	int i, rv;
5051 	struct iwk_calibration_cmd cmd;
5052 	struct iwk_rx_gain_diff *gain_diff_p;
5053 
5054 	gain_diff_p = &sc->sc_rxgain_diff;
5055 
5056 	(void) memset(gain_diff_p, 0, sizeof (struct iwk_rx_gain_diff));
5057 	(void) memset(&cmd, 0, sizeof (struct iwk_calibration_cmd));
5058 
5059 	for (i = 0; i < RX_CHAINS_NUM; i++) {
5060 		gain_diff_p->gain_diff_chain[i] = CHAIN_GAIN_DIFF_INIT_VAL;
5061 	}
5062 
5063 	if (iwk_is_associated(sc)) {
5064 		cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
5065 		cmd.diff_gain_a = 0;
5066 		cmd.diff_gain_b = 0;
5067 		cmd.diff_gain_c = 0;
5068 
5069 		/* assume the gains of every Rx chains is balanceable */
5070 		rv = iwk_cmd(sc, REPLY_PHY_CALIBRATION_CMD, &cmd,
5071 		    sizeof (cmd), 1);
5072 		if (rv) {
5073 			return (rv);
5074 		}
5075 
5076 		gain_diff_p->state = IWK_GAIN_DIFF_ACCUMULATE;
5077 	}
5078 
5079 	return (IWK_SUCCESS);
5080 }
5081 
5082 /*
5083  * make Receiver gain balance to balance Rx gain between Rx chains
5084  * and determine which chain is disconnected
5085  */
5086 static int iwk_rxgain_diff(iwk_sc_t *sc)
5087 {
5088 	int i, is_24G, rv;
5089 	int max_beacon_chain_n;
5090 	int min_noise_chain_n;
5091 	uint16_t channel_n;
5092 	int32_t beacon_diff;
5093 	int32_t noise_diff;
5094 	uint32_t noise_chain_a, noise_chain_b, noise_chain_c;
5095 	uint32_t beacon_chain_a, beacon_chain_b, beacon_chain_c;
5096 	struct iwk_calibration_cmd cmd;
5097 	uint32_t beacon_aver[RX_CHAINS_NUM] = {0xFFFFFFFF};
5098 	uint32_t noise_aver[RX_CHAINS_NUM] = {0xFFFFFFFF};
5099 	struct statistics_rx_non_phy *rx_general_p =
5100 	    &sc->sc_statistics.rx.general;
5101 	struct iwk_rx_gain_diff *gain_diff_p = &sc->sc_rxgain_diff;
5102 
5103 	if (INTERFERENCE_DATA_AVAILABLE !=
5104 	    rx_general_p->interference_data_flag) {
5105 		return (IWK_SUCCESS);
5106 	}
5107 
5108 	if (IWK_GAIN_DIFF_ACCUMULATE != gain_diff_p->state) {
5109 		return (IWK_SUCCESS);
5110 	}
5111 
5112 	is_24G = iwk_is_24G_band(sc);
5113 	channel_n = sc->sc_config.chan;	 /* channel number */
5114 
5115 	if ((channel_n != (sc->sc_statistics.flag >> 16)) ||
5116 	    ((STATISTICS_REPLY_FLG_BAND_24G_MSK ==
5117 	    (sc->sc_statistics.flag & STATISTICS_REPLY_FLG_BAND_24G_MSK)) &&
5118 	    !is_24G)) {
5119 		return (IWK_SUCCESS);
5120 	}
5121 
5122 	/* Rx chain's noise strength from statistics notification */
5123 	noise_chain_a = rx_general_p->beacon_silence_rssi_a & 0xFF;
5124 	noise_chain_b = rx_general_p->beacon_silence_rssi_b & 0xFF;
5125 	noise_chain_c = rx_general_p->beacon_silence_rssi_c & 0xFF;
5126 
5127 	/* Rx chain's beacon strength from statistics notification */
5128 	beacon_chain_a = rx_general_p->beacon_rssi_a & 0xFF;
5129 	beacon_chain_b = rx_general_p->beacon_rssi_b & 0xFF;
5130 	beacon_chain_c = rx_general_p->beacon_rssi_c & 0xFF;
5131 
5132 	gain_diff_p->beacon_count++;
5133 
5134 	/* accumulate chain's noise strength */
5135 	gain_diff_p->noise_stren_a += noise_chain_a;
5136 	gain_diff_p->noise_stren_b += noise_chain_b;
5137 	gain_diff_p->noise_stren_c += noise_chain_c;
5138 
5139 	/* accumulate chain's beacon strength */
5140 	gain_diff_p->beacon_stren_a += beacon_chain_a;
5141 	gain_diff_p->beacon_stren_b += beacon_chain_b;
5142 	gain_diff_p->beacon_stren_c += beacon_chain_c;
5143 
5144 	if (BEACON_NUM_20 == gain_diff_p->beacon_count) {
5145 		/* calculate average beacon strength */
5146 		beacon_aver[0] = (gain_diff_p->beacon_stren_a) / BEACON_NUM_20;
5147 		beacon_aver[1] = (gain_diff_p->beacon_stren_b) / BEACON_NUM_20;
5148 		beacon_aver[2] = (gain_diff_p->beacon_stren_c) / BEACON_NUM_20;
5149 
5150 		/* calculate average noise strength */
5151 		noise_aver[0] = (gain_diff_p->noise_stren_a) / BEACON_NUM_20;
5152 		noise_aver[1] = (gain_diff_p->noise_stren_b) / BEACON_NUM_20;
5153 		noise_aver[2] = (gain_diff_p->noise_stren_b) / BEACON_NUM_20;
5154 
5155 		/* determine maximum beacon strength among 3 chains */
5156 		if ((beacon_aver[0] >= beacon_aver[1]) &&
5157 		    (beacon_aver[0] >= beacon_aver[2])) {
5158 			max_beacon_chain_n = 0;
5159 			gain_diff_p->connected_chains = 1 << 0;
5160 		} else if (beacon_aver[1] >= beacon_aver[2]) {
5161 			max_beacon_chain_n = 1;
5162 			gain_diff_p->connected_chains = 1 << 1;
5163 		} else {
5164 			max_beacon_chain_n = 2;
5165 			gain_diff_p->connected_chains = 1 << 2;
5166 		}
5167 
5168 		/* determine which chain is disconnected */
5169 		for (i = 0; i < RX_CHAINS_NUM; i++) {
5170 			if (i != max_beacon_chain_n) {
5171 				beacon_diff = beacon_aver[max_beacon_chain_n] -
5172 				    beacon_aver[i];
5173 				if (beacon_diff > MAX_ALLOWED_DIFF) {
5174 					gain_diff_p->disconnect_chain[i] = 1;
5175 				} else {
5176 					gain_diff_p->connected_chains |=
5177 					    (1 << i);
5178 				}
5179 			}
5180 		}
5181 
5182 		/*
5183 		 * if chain A and B are both disconnected,
5184 		 * assume the stronger in beacon strength is connected
5185 		 */
5186 		if (gain_diff_p->disconnect_chain[0] &&
5187 		    gain_diff_p->disconnect_chain[1]) {
5188 			if (beacon_aver[0] >= beacon_aver[1]) {
5189 				gain_diff_p->disconnect_chain[0] = 0;
5190 				gain_diff_p->connected_chains |= (1 << 0);
5191 			} else {
5192 				gain_diff_p->disconnect_chain[1] = 0;
5193 				gain_diff_p->connected_chains |= (1 << 1);
5194 			}
5195 		}
5196 
5197 		/* determine minimum noise strength among 3 chains */
5198 		if (!gain_diff_p->disconnect_chain[0]) {
5199 			min_noise_chain_n = 0;
5200 
5201 			for (i = 0; i < RX_CHAINS_NUM; i++) {
5202 				if (!gain_diff_p->disconnect_chain[i] &&
5203 				    (noise_aver[i] <=
5204 				    noise_aver[min_noise_chain_n])) {
5205 					min_noise_chain_n = i;
5206 				}
5207 
5208 			}
5209 		} else {
5210 			min_noise_chain_n = 1;
5211 
5212 			for (i = 0; i < RX_CHAINS_NUM; i++) {
5213 				if (!gain_diff_p->disconnect_chain[i] &&
5214 				    (noise_aver[i] <=
5215 				    noise_aver[min_noise_chain_n])) {
5216 					min_noise_chain_n = i;
5217 				}
5218 			}
5219 		}
5220 
5221 		gain_diff_p->gain_diff_chain[min_noise_chain_n] = 0;
5222 
5223 		/* determine gain difference between chains */
5224 		for (i = 0; i < RX_CHAINS_NUM; i++) {
5225 			if (!gain_diff_p->disconnect_chain[i] &&
5226 			    (CHAIN_GAIN_DIFF_INIT_VAL ==
5227 			    gain_diff_p->gain_diff_chain[i])) {
5228 
5229 				noise_diff = noise_aver[i] -
5230 				    noise_aver[min_noise_chain_n];
5231 				gain_diff_p->gain_diff_chain[i] =
5232 				    (uint8_t)((noise_diff * 10) / 15);
5233 
5234 				if (gain_diff_p->gain_diff_chain[i] > 3) {
5235 					gain_diff_p->gain_diff_chain[i] = 3;
5236 				}
5237 
5238 				gain_diff_p->gain_diff_chain[i] |= (1 << 2);
5239 			} else {
5240 				gain_diff_p->gain_diff_chain[i] = 0;
5241 			}
5242 		}
5243 
5244 		if (!gain_diff_p->gain_diff_send) {
5245 			gain_diff_p->gain_diff_send = 1;
5246 
5247 			(void) memset(&cmd, 0, sizeof (cmd));
5248 
5249 			cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
5250 			cmd.diff_gain_a = gain_diff_p->gain_diff_chain[0];
5251 			cmd.diff_gain_b = gain_diff_p->gain_diff_chain[1];
5252 			cmd.diff_gain_c = gain_diff_p->gain_diff_chain[2];
5253 
5254 			/*
5255 			 * send out PHY calibration command to
5256 			 * adjust every chain's Rx gain
5257 			 */
5258 			rv = iwk_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
5259 			    &cmd, sizeof (cmd), 1);
5260 			if (rv) {
5261 				return (rv);
5262 			}
5263 
5264 			gain_diff_p->state = IWK_GAIN_DIFF_CALIBRATED;
5265 		}
5266 
5267 		gain_diff_p->beacon_stren_a = 0;
5268 		gain_diff_p->beacon_stren_b = 0;
5269 		gain_diff_p->beacon_stren_c = 0;
5270 
5271 		gain_diff_p->noise_stren_a = 0;
5272 		gain_diff_p->noise_stren_b = 0;
5273 		gain_diff_p->noise_stren_c = 0;
5274 	}
5275 
5276 	return (IWK_SUCCESS);
5277 }
5278 
5279 /* Make necessary preparation for Receiver sensitivity calibration */
5280 static int iwk_rx_sens_init(iwk_sc_t *sc)
5281 {
5282 	int i, rv;
5283 	struct iwk_rx_sensitivity_cmd cmd;
5284 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5285 
5286 	(void) memset(&cmd, 0, sizeof (struct iwk_rx_sensitivity_cmd));
5287 	(void) memset(rx_sens_p, 0, sizeof (struct iwk_rx_sensitivity));
5288 
5289 	rx_sens_p->auto_corr_ofdm_x4 = 90;
5290 	rx_sens_p->auto_corr_mrc_ofdm_x4 = 170;
5291 	rx_sens_p->auto_corr_ofdm_x1 = 105;
5292 	rx_sens_p->auto_corr_mrc_ofdm_x1 = 220;
5293 
5294 	rx_sens_p->auto_corr_cck_x4 = 125;
5295 	rx_sens_p->auto_corr_mrc_cck_x4 = 200;
5296 	rx_sens_p->min_energy_det_cck = 100;
5297 
5298 	rx_sens_p->flags &= (~IWK_SENSITIVITY_CALIB_ALLOW_MSK);
5299 	rx_sens_p->flags &= (~IWK_SENSITIVITY_OFDM_UPDATE_MSK);
5300 	rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
5301 
5302 	rx_sens_p->last_bad_plcp_cnt_ofdm = 0;
5303 	rx_sens_p->last_false_alarm_cnt_ofdm = 0;
5304 	rx_sens_p->last_bad_plcp_cnt_cck = 0;
5305 	rx_sens_p->last_false_alarm_cnt_cck = 0;
5306 
5307 	rx_sens_p->cck_curr_state = IWK_TOO_MANY_FALSE_ALARM;
5308 	rx_sens_p->cck_prev_state = IWK_TOO_MANY_FALSE_ALARM;
5309 	rx_sens_p->cck_no_false_alarm_num = 0;
5310 	rx_sens_p->cck_beacon_idx = 0;
5311 
5312 	for (i = 0; i < 10; i++) {
5313 		rx_sens_p->cck_beacon_min[i] = 0;
5314 	}
5315 
5316 	rx_sens_p->cck_noise_idx = 0;
5317 	rx_sens_p->cck_noise_ref = 0;
5318 
5319 	for (i = 0; i < 20; i++) {
5320 		rx_sens_p->cck_noise_max[i] = 0;
5321 	}
5322 
5323 	rx_sens_p->cck_noise_diff = 0;
5324 	rx_sens_p->cck_no_false_alarm_num = 0;
5325 
5326 	cmd.control = IWK_SENSITIVITY_CONTROL_WORK_TABLE;
5327 
5328 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
5329 	    rx_sens_p->auto_corr_ofdm_x4;
5330 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
5331 	    rx_sens_p->auto_corr_mrc_ofdm_x4;
5332 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
5333 	    rx_sens_p->auto_corr_ofdm_x1;
5334 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
5335 	    rx_sens_p->auto_corr_mrc_ofdm_x1;
5336 
5337 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
5338 	    rx_sens_p->auto_corr_cck_x4;
5339 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
5340 	    rx_sens_p->auto_corr_mrc_cck_x4;
5341 	cmd.table[MIN_ENERGY_CCK_DET_IDX] = rx_sens_p->min_energy_det_cck;
5342 
5343 	cmd.table[MIN_ENERGY_OFDM_DET_IDX] = 100;
5344 	cmd.table[BARKER_CORR_TH_ADD_MIN_IDX] = 190;
5345 	cmd.table[BARKER_CORR_TH_ADD_MIN_MRC_IDX] = 390;
5346 	cmd.table[PTAM_ENERGY_TH_IDX] = 62;
5347 
5348 	/* at first, set up Rx to maximum sensitivity */
5349 	rv = iwk_cmd(sc, SENSITIVITY_CMD, &cmd, sizeof (cmd), 1);
5350 	if (rv) {
5351 		cmn_err(CE_WARN, "iwk_rx_sens_init(): "
5352 		    "in the process of initialization, "
5353 		    "failed to send rx sensitivity command\n");
5354 		return (rv);
5355 	}
5356 
5357 	rx_sens_p->flags |= IWK_SENSITIVITY_CALIB_ALLOW_MSK;
5358 
5359 	return (IWK_SUCCESS);
5360 }
5361 
5362 /*
5363  * make Receiver sensitivity calibration to adjust every chain's Rx sensitivity.
5364  * for more infomation, please refer to iwk_calibration.h file
5365  */
5366 static int iwk_rx_sens(iwk_sc_t *sc)
5367 {
5368 	int rv;
5369 	uint32_t actual_rx_time;
5370 	struct statistics_rx_non_phy *rx_general_p =
5371 	    &sc->sc_statistics.rx.general;
5372 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5373 	struct iwk_rx_sensitivity_cmd cmd;
5374 
5375 	if (!(rx_sens_p->flags & IWK_SENSITIVITY_CALIB_ALLOW_MSK)) {
5376 		cmn_err(CE_WARN, "iwk_rx_sens(): "
5377 		    "sensitivity initialization has not finished.\n");
5378 		return (DDI_FAILURE);
5379 	}
5380 
5381 	if (INTERFERENCE_DATA_AVAILABLE !=
5382 	    rx_general_p->interference_data_flag) {
5383 		cmn_err(CE_WARN, "iwk_rx_sens(): "
5384 		    "can't make rx sensitivity calibration,"
5385 		    "because of invalid statistics\n");
5386 		return (DDI_FAILURE);
5387 	}
5388 
5389 	actual_rx_time = rx_general_p->channel_load;
5390 	if (!actual_rx_time) {
5391 		IWK_DBG((IWK_DEBUG_CALIBRATION, "iwk_rx_sens(): "
5392 		    "can't make rx sensitivity calibration,"
5393 		    "because has not enough rx time\n"));
5394 		return (DDI_FAILURE);
5395 	}
5396 
5397 	/* make Rx sensitivity calibration for OFDM mode */
5398 	rv = iwk_ofdm_sens(sc, actual_rx_time);
5399 	if (rv) {
5400 		return (rv);
5401 	}
5402 
5403 	/* make Rx sensitivity calibration for CCK mode */
5404 	rv = iwk_cck_sens(sc, actual_rx_time);
5405 	if (rv) {
5406 		return (rv);
5407 	}
5408 
5409 	/*
5410 	 * if the sum of false alarm had not changed, nothing will be done
5411 	 */
5412 	if ((!(rx_sens_p->flags & IWK_SENSITIVITY_OFDM_UPDATE_MSK)) &&
5413 	    (!(rx_sens_p->flags & IWK_SENSITIVITY_CCK_UPDATE_MSK))) {
5414 		return (IWK_SUCCESS);
5415 	}
5416 
5417 	cmd.control = IWK_SENSITIVITY_CONTROL_WORK_TABLE;
5418 
5419 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
5420 	    rx_sens_p->auto_corr_ofdm_x4;
5421 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
5422 	    rx_sens_p->auto_corr_mrc_ofdm_x4;
5423 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
5424 	    rx_sens_p->auto_corr_ofdm_x1;
5425 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
5426 	    rx_sens_p->auto_corr_mrc_ofdm_x1;
5427 
5428 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
5429 	    rx_sens_p->auto_corr_cck_x4;
5430 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
5431 	    rx_sens_p->auto_corr_mrc_cck_x4;
5432 	cmd.table[MIN_ENERGY_CCK_DET_IDX] =
5433 	    rx_sens_p->min_energy_det_cck;
5434 
5435 	cmd.table[MIN_ENERGY_OFDM_DET_IDX] = 100;
5436 	cmd.table[BARKER_CORR_TH_ADD_MIN_IDX] = 190;
5437 	cmd.table[BARKER_CORR_TH_ADD_MIN_MRC_IDX] = 390;
5438 	cmd.table[PTAM_ENERGY_TH_IDX] = 62;
5439 
5440 	/*
5441 	 * send sensitivity command to complete actual sensitivity calibration
5442 	 */
5443 	rv = iwk_cmd(sc, SENSITIVITY_CMD, &cmd, sizeof (cmd), 1);
5444 	if (rv) {
5445 		cmn_err(CE_WARN, "iwk_rx_sens(): "
5446 		    "fail to send rx sensitivity command\n");
5447 		return (rv);
5448 	}
5449 
5450 	return (IWK_SUCCESS);
5451 
5452 }
5453 
5454 /*
5455  * make Rx sensitivity calibration for CCK mode.
5456  * This is preparing parameters for Sensitivity command
5457  */
5458 static int iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time)
5459 {
5460 	int i;
5461 	uint8_t noise_a, noise_b, noise_c;
5462 	uint8_t max_noise_abc, max_noise_20;
5463 	uint32_t beacon_a, beacon_b, beacon_c;
5464 	uint32_t min_beacon_abc, max_beacon_10;
5465 	uint32_t cck_fa, cck_bp;
5466 	uint32_t cck_sum_fa_bp;
5467 	uint32_t temp;
5468 	struct statistics_rx_non_phy *rx_general_p =
5469 	    &sc->sc_statistics.rx.general;
5470 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5471 
5472 	cck_fa = sc->sc_statistics.rx.cck.false_alarm_cnt;
5473 	cck_bp = sc->sc_statistics.rx.cck.plcp_err;
5474 
5475 	/* accumulate false alarm */
5476 	if (rx_sens_p->last_false_alarm_cnt_cck > cck_fa) {
5477 		temp = rx_sens_p->last_false_alarm_cnt_cck;
5478 		rx_sens_p->last_false_alarm_cnt_cck = cck_fa;
5479 		cck_fa += (0xFFFFFFFF - temp);
5480 	} else {
5481 		cck_fa -= rx_sens_p->last_false_alarm_cnt_cck;
5482 		rx_sens_p->last_false_alarm_cnt_cck += cck_fa;
5483 	}
5484 
5485 	/* accumulate bad plcp */
5486 	if (rx_sens_p->last_bad_plcp_cnt_cck > cck_bp) {
5487 		temp = rx_sens_p->last_bad_plcp_cnt_cck;
5488 		rx_sens_p->last_bad_plcp_cnt_cck = cck_bp;
5489 		cck_bp += (0xFFFFFFFF - temp);
5490 	} else {
5491 		cck_bp -= rx_sens_p->last_bad_plcp_cnt_cck;
5492 		rx_sens_p->last_bad_plcp_cnt_cck += cck_bp;
5493 	}
5494 
5495 	/*
5496 	 * calculate relative value
5497 	 */
5498 	cck_sum_fa_bp = (cck_fa + cck_bp) * 200 * 1024;
5499 	rx_sens_p->cck_noise_diff = 0;
5500 
5501 	noise_a =
5502 	    (uint8_t)((rx_general_p->beacon_silence_rssi_a & 0xFF00) >> 8);
5503 	noise_b =
5504 	    (uint8_t)((rx_general_p->beacon_silence_rssi_b & 0xFF00) >> 8);
5505 	noise_c =
5506 	    (uint8_t)((rx_general_p->beacon_silence_rssi_c & 0xFF00) >> 8);
5507 
5508 	beacon_a = rx_general_p->beacon_energy_a;
5509 	beacon_b = rx_general_p->beacon_energy_b;
5510 	beacon_c = rx_general_p->beacon_energy_c;
5511 
5512 	/* determine maximum noise among 3 chains */
5513 	if ((noise_a >= noise_b) && (noise_a >= noise_c)) {
5514 		max_noise_abc = noise_a;
5515 	} else if (noise_b >= noise_c) {
5516 		max_noise_abc = noise_b;
5517 	} else {
5518 		max_noise_abc = noise_c;
5519 	}
5520 
5521 	/* record maximum noise among 3 chains */
5522 	rx_sens_p->cck_noise_max[rx_sens_p->cck_noise_idx] = max_noise_abc;
5523 	rx_sens_p->cck_noise_idx++;
5524 	if (rx_sens_p->cck_noise_idx >= 20) {
5525 		rx_sens_p->cck_noise_idx = 0;
5526 	}
5527 
5528 	/* determine maximum noise among 20 max noise */
5529 	max_noise_20 = rx_sens_p->cck_noise_max[0];
5530 	for (i = 0; i < 20; i++) {
5531 		if (rx_sens_p->cck_noise_max[i] >= max_noise_20) {
5532 			max_noise_20 = rx_sens_p->cck_noise_max[i];
5533 		}
5534 	}
5535 
5536 	/* determine minimum beacon among 3 chains */
5537 	if ((beacon_a <= beacon_b) && (beacon_a <= beacon_c)) {
5538 		min_beacon_abc = beacon_a;
5539 	} else if (beacon_b <= beacon_c) {
5540 		min_beacon_abc = beacon_b;
5541 	} else {
5542 		min_beacon_abc = beacon_c;
5543 	}
5544 
5545 	/* record miminum beacon among 3 chains */
5546 	rx_sens_p->cck_beacon_min[rx_sens_p->cck_beacon_idx] = min_beacon_abc;
5547 	rx_sens_p->cck_beacon_idx++;
5548 	if (rx_sens_p->cck_beacon_idx >= 10) {
5549 		rx_sens_p->cck_beacon_idx = 0;
5550 	}
5551 
5552 	/* determine maximum beacon among 10 miminum beacon among 3 chains */
5553 	max_beacon_10 = rx_sens_p->cck_beacon_min[0];
5554 	for (i = 0; i < 10; i++) {
5555 		if (rx_sens_p->cck_beacon_min[i] >= max_beacon_10) {
5556 			max_beacon_10 = rx_sens_p->cck_beacon_min[i];
5557 		}
5558 	}
5559 
5560 	/* add a little margin */
5561 	max_beacon_10 += 6;
5562 
5563 	/* record the count of having no false alarms */
5564 	if (cck_sum_fa_bp < (5 * actual_rx_time)) {
5565 		rx_sens_p->cck_no_false_alarm_num++;
5566 	} else {
5567 		rx_sens_p->cck_no_false_alarm_num = 0;
5568 	}
5569 
5570 	/*
5571 	 * adjust parameters in sensitivity command
5572 	 * according to different status.
5573 	 * for more infomation, please refer to iwk_calibration.h file
5574 	 */
5575 	if (cck_sum_fa_bp > (50 * actual_rx_time)) {
5576 		rx_sens_p->cck_curr_state = IWK_TOO_MANY_FALSE_ALARM;
5577 
5578 		if (rx_sens_p->auto_corr_cck_x4 > 160) {
5579 			rx_sens_p->cck_noise_ref = max_noise_20;
5580 
5581 			if (rx_sens_p->min_energy_det_cck > 2) {
5582 				rx_sens_p->min_energy_det_cck -= 2;
5583 			}
5584 		}
5585 
5586 		if (rx_sens_p->auto_corr_cck_x4 < 160) {
5587 			rx_sens_p->auto_corr_cck_x4 = 160 + 1;
5588 		} else {
5589 			if ((rx_sens_p->auto_corr_cck_x4 + 3) < 200) {
5590 				rx_sens_p->auto_corr_cck_x4 += 3;
5591 			} else {
5592 				rx_sens_p->auto_corr_cck_x4 = 200;
5593 			}
5594 		}
5595 
5596 		if ((rx_sens_p->auto_corr_mrc_cck_x4 + 3) < 400) {
5597 			rx_sens_p->auto_corr_mrc_cck_x4 += 3;
5598 		} else {
5599 			rx_sens_p->auto_corr_mrc_cck_x4 = 400;
5600 		}
5601 
5602 		rx_sens_p->flags |= IWK_SENSITIVITY_CCK_UPDATE_MSK;
5603 
5604 	} else if (cck_sum_fa_bp < (5 * actual_rx_time)) {
5605 		rx_sens_p->cck_curr_state = IWK_TOO_FEW_FALSE_ALARM;
5606 
5607 		rx_sens_p->cck_noise_diff = (int32_t)rx_sens_p->cck_noise_ref -
5608 		    (int32_t)max_noise_20;
5609 
5610 		if ((rx_sens_p->cck_prev_state != IWK_TOO_MANY_FALSE_ALARM) &&
5611 		    ((rx_sens_p->cck_noise_diff > 2) ||
5612 		    (rx_sens_p->cck_no_false_alarm_num > 100))) {
5613 			if ((rx_sens_p->min_energy_det_cck + 2) < 97) {
5614 				rx_sens_p->min_energy_det_cck += 2;
5615 			} else {
5616 				rx_sens_p->min_energy_det_cck = 97;
5617 			}
5618 
5619 			if ((rx_sens_p->auto_corr_cck_x4 - 3) > 125) {
5620 				rx_sens_p->auto_corr_cck_x4 -= 3;
5621 			} else {
5622 				rx_sens_p->auto_corr_cck_x4 = 125;
5623 			}
5624 
5625 			if ((rx_sens_p->auto_corr_mrc_cck_x4 -3) > 200) {
5626 				rx_sens_p->auto_corr_mrc_cck_x4 -= 3;
5627 			} else {
5628 				rx_sens_p->auto_corr_mrc_cck_x4 = 200;
5629 			}
5630 
5631 			rx_sens_p->flags |= IWK_SENSITIVITY_CCK_UPDATE_MSK;
5632 		} else {
5633 			rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
5634 		}
5635 	} else {
5636 		rx_sens_p->cck_curr_state = IWK_GOOD_RANGE_FALSE_ALARM;
5637 
5638 		rx_sens_p->cck_noise_ref = max_noise_20;
5639 
5640 		if (IWK_TOO_MANY_FALSE_ALARM == rx_sens_p->cck_prev_state) {
5641 			rx_sens_p->min_energy_det_cck -= 8;
5642 		}
5643 
5644 		rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
5645 	}
5646 
5647 	if (rx_sens_p->min_energy_det_cck < max_beacon_10) {
5648 		rx_sens_p->min_energy_det_cck = (uint16_t)max_beacon_10;
5649 	}
5650 
5651 	rx_sens_p->cck_prev_state = rx_sens_p->cck_curr_state;
5652 
5653 	return (IWK_SUCCESS);
5654 }
5655 
5656 /*
5657  * make Rx sensitivity calibration for OFDM mode.
5658  * This is preparing parameters for Sensitivity command
5659  */
5660 static int iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time)
5661 {
5662 	uint32_t temp;
5663 	uint16_t temp1;
5664 	uint32_t ofdm_fa, ofdm_bp;
5665 	uint32_t ofdm_sum_fa_bp;
5666 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5667 
5668 	ofdm_fa = sc->sc_statistics.rx.ofdm.false_alarm_cnt;
5669 	ofdm_bp = sc->sc_statistics.rx.ofdm.plcp_err;
5670 
5671 	/* accumulate false alarm */
5672 	if (rx_sens_p->last_false_alarm_cnt_ofdm > ofdm_fa) {
5673 		temp = rx_sens_p->last_false_alarm_cnt_ofdm;
5674 		rx_sens_p->last_false_alarm_cnt_ofdm = ofdm_fa;
5675 		ofdm_fa += (0xFFFFFFFF - temp);
5676 	} else {
5677 		ofdm_fa -= rx_sens_p->last_false_alarm_cnt_ofdm;
5678 		rx_sens_p->last_false_alarm_cnt_ofdm += ofdm_fa;
5679 	}
5680 
5681 	/* accumulate bad plcp */
5682 	if (rx_sens_p->last_bad_plcp_cnt_ofdm > ofdm_bp) {
5683 		temp = rx_sens_p->last_bad_plcp_cnt_ofdm;
5684 		rx_sens_p->last_bad_plcp_cnt_ofdm = ofdm_bp;
5685 		ofdm_bp += (0xFFFFFFFF - temp);
5686 	} else {
5687 		ofdm_bp -= rx_sens_p->last_bad_plcp_cnt_ofdm;
5688 		rx_sens_p->last_bad_plcp_cnt_ofdm += ofdm_bp;
5689 	}
5690 
5691 	ofdm_sum_fa_bp = (ofdm_fa + ofdm_bp) * 200 * 1024; /* relative value */
5692 
5693 	/*
5694 	 * adjust parameter in sensitivity command according to different status
5695 	 */
5696 	if (ofdm_sum_fa_bp > (50 * actual_rx_time)) {
5697 		temp1 = rx_sens_p->auto_corr_ofdm_x4 + 1;
5698 		rx_sens_p->auto_corr_ofdm_x4 = (temp1 <= 120) ? temp1 : 120;
5699 
5700 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x4 + 1;
5701 		rx_sens_p->auto_corr_mrc_ofdm_x4 =
5702 		    (temp1 <= 210) ? temp1 : 210;
5703 
5704 		temp1 = rx_sens_p->auto_corr_ofdm_x1 + 1;
5705 		rx_sens_p->auto_corr_ofdm_x1 = (temp1 <= 140) ? temp1 : 140;
5706 
5707 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x1 + 1;
5708 		rx_sens_p->auto_corr_mrc_ofdm_x1 =
5709 		    (temp1 <= 270) ? temp1 : 270;
5710 
5711 		rx_sens_p->flags |= IWK_SENSITIVITY_OFDM_UPDATE_MSK;
5712 
5713 	} else if (ofdm_sum_fa_bp < (5 * actual_rx_time)) {
5714 		temp1 = rx_sens_p->auto_corr_ofdm_x4 - 1;
5715 		rx_sens_p->auto_corr_ofdm_x4 = (temp1 >= 85) ? temp1 : 85;
5716 
5717 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x4 - 1;
5718 		rx_sens_p->auto_corr_mrc_ofdm_x4 =
5719 		    (temp1 >= 170) ? temp1 : 170;
5720 
5721 		temp1 = rx_sens_p->auto_corr_ofdm_x1 - 1;
5722 		rx_sens_p->auto_corr_ofdm_x1 = (temp1 >= 105) ? temp1 : 105;
5723 
5724 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x1 - 1;
5725 		rx_sens_p->auto_corr_mrc_ofdm_x1 =
5726 		    (temp1 >= 220) ? temp1 : 220;
5727 
5728 		rx_sens_p->flags |= IWK_SENSITIVITY_OFDM_UPDATE_MSK;
5729 
5730 	} else {
5731 		rx_sens_p->flags &= (~IWK_SENSITIVITY_OFDM_UPDATE_MSK);
5732 	}
5733 
5734 	return (IWK_SUCCESS);
5735 }
5736 
5737 /*
5738  * additional process to management frames
5739  */
5740 static void iwk_recv_mgmt(struct ieee80211com *ic, mblk_t *mp,
5741     struct ieee80211_node *in,
5742     int subtype, int rssi, uint32_t rstamp)
5743 {
5744 	iwk_sc_t *sc = (iwk_sc_t *)ic;
5745 	struct ieee80211_frame *wh;
5746 	uint8_t index1, index2;
5747 	int err;
5748 
5749 	sc->sc_recv_mgmt(ic, mp, in, subtype, rssi, rstamp);
5750 
5751 	mutex_enter(&sc->sc_glock);
5752 	switch (subtype) {
5753 	case IEEE80211_FC0_SUBTYPE_BEACON:
5754 		if (sc->sc_ibss.ibss_beacon.syncbeacon && in == ic->ic_bss &&
5755 		    ic->ic_state == IEEE80211_S_RUN) {
5756 			if (ieee80211_beacon_update(ic, in,
5757 			    &sc->sc_ibss.ibss_beacon.iwk_boff,
5758 			    sc->sc_ibss.ibss_beacon.mp, 0)) {
5759 				bcopy(sc->sc_ibss.ibss_beacon.mp->b_rptr,
5760 				    sc->sc_ibss.ibss_beacon.beacon_cmd.
5761 				    bcon_frame,
5762 				    MBLKL(sc->sc_ibss.ibss_beacon.mp));
5763 			}
5764 			err = iwk_cmd(sc, REPLY_TX_BEACON,
5765 			    &sc->sc_ibss.ibss_beacon.beacon_cmd,
5766 			    sc->sc_ibss.ibss_beacon.beacon_cmd_len, 1);
5767 			if (err != IWK_SUCCESS) {
5768 				cmn_err(CE_WARN, "iwk_recv_mgmt(): "
5769 				    "failed to TX beacon.\n");
5770 			}
5771 			sc->sc_ibss.ibss_beacon.syncbeacon = 0;
5772 		}
5773 		if (ic->ic_opmode == IEEE80211_M_IBSS &&
5774 		    ic->ic_state == IEEE80211_S_RUN) {
5775 			wh = (struct ieee80211_frame *)mp->b_rptr;
5776 			mutex_enter(&sc->sc_ibss.node_tb_lock);
5777 			/*
5778 			 * search for node in ibss node table
5779 			 */
5780 			for (index1 = IWK_STA_ID; index1 < IWK_STATION_COUNT;
5781 			    index1++) {
5782 				if (sc->sc_ibss.ibss_node_tb[index1].used &&
5783 				    IEEE80211_ADDR_EQ(sc->sc_ibss.
5784 				    ibss_node_tb[index1].node.bssid,
5785 				    wh->i_addr2)) {
5786 					break;
5787 				}
5788 			}
5789 			/*
5790 			 * if don't find in ibss node table
5791 			 */
5792 			if (index1 >= IWK_BROADCAST_ID) {
5793 				err = iwk_clean_add_node_ibss(ic,
5794 				    wh->i_addr2, &index2);
5795 				if (err != IWK_SUCCESS) {
5796 					cmn_err(CE_WARN, "iwk_recv_mgmt(): "
5797 					    "failed to clean all nodes "
5798 					    "and add one node\n");
5799 				}
5800 			}
5801 			mutex_exit(&sc->sc_ibss.node_tb_lock);
5802 		}
5803 		break;
5804 	case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
5805 		break;
5806 	}
5807 	mutex_exit(&sc->sc_glock);
5808 }
5809 
5810 /*
5811  * 1)  log_event_table_ptr indicates base of the event log.  This traces
5812  *     a 256-entry history of uCode execution within a circular buffer.
5813  *     Its header format is:
5814  *
5815  *	uint32_t log_size;	log capacity (in number of entries)
5816  *	uint32_t type;	(1) timestamp with each entry, (0) no timestamp
5817  *	uint32_t wraps;	# times uCode has wrapped to top of circular buffer
5818  *      uint32_t write_index;	next circular buffer entry that uCode would fill
5819  *
5820  *     The header is followed by the circular buffer of log entries.  Entries
5821  *     with timestamps have the following format:
5822  *
5823  *	uint32_t event_id;     range 0 - 1500
5824  *	uint32_t timestamp;    low 32 bits of TSF (of network, if associated)
5825  *	uint32_t data;         event_id-specific data value
5826  *
5827  *     Entries without timestamps contain only event_id and data.
5828  */
5829 
5830 /*
5831  * iwk_write_event_log - Write event log to dmesg
5832  */
5833 static void iwk_write_event_log(iwk_sc_t *sc)
5834 {
5835 	uint32_t log_event_table_ptr;	/* Start address of event table */
5836 	uint32_t startptr;	/* Start address of log data */
5837 	uint32_t logptr;	/* address of log data entry */
5838 	uint32_t i, n, num_events;
5839 	uint32_t event_id, data1, data2; /* log data */
5840 
5841 	uint32_t log_size;   /* log capacity (in number of entries) */
5842 	uint32_t type;	/* (1)timestamp with each entry,(0) no timestamp */
5843 	uint32_t wraps;	/* # times uCode has wrapped to */
5844 			/* the top of circular buffer */
5845 	uint32_t idx; /* index of entry to be filled in next */
5846 
5847 	log_event_table_ptr = sc->sc_card_alive_run.log_event_table_ptr;
5848 	if (!(log_event_table_ptr)) {
5849 		IWK_DBG((IWK_DEBUG_EEPROM, "NULL event table pointer\n"));
5850 		return;
5851 	}
5852 
5853 	iwk_mac_access_enter(sc);
5854 
5855 	/* Read log header */
5856 	log_size = iwk_mem_read(sc, log_event_table_ptr);
5857 	log_event_table_ptr += sizeof (uint32_t); /* addr of "type" */
5858 	type = iwk_mem_read(sc, log_event_table_ptr);
5859 	log_event_table_ptr += sizeof (uint32_t); /* addr of "wraps" */
5860 	wraps = iwk_mem_read(sc, log_event_table_ptr);
5861 	log_event_table_ptr += sizeof (uint32_t); /* addr of "idx" */
5862 	idx = iwk_mem_read(sc, log_event_table_ptr);
5863 	startptr = log_event_table_ptr +
5864 	    sizeof (uint32_t); /* addr of start of log data */
5865 	if (!log_size & !wraps) {
5866 		IWK_DBG((IWK_DEBUG_EEPROM, "Empty log\n"));
5867 		iwk_mac_access_exit(sc);
5868 		return;
5869 	}
5870 
5871 	if (!wraps) {
5872 		num_events = idx;
5873 		logptr = startptr;
5874 	} else {
5875 		num_events = log_size - idx;
5876 		n = type ? 2 : 3;
5877 		logptr = startptr + (idx * n * sizeof (uint32_t));
5878 	}
5879 
5880 	for (i = 0; i < num_events; i++) {
5881 		event_id = iwk_mem_read(sc, logptr);
5882 		logptr += sizeof (uint32_t);
5883 		data1 = iwk_mem_read(sc, logptr);
5884 		logptr += sizeof (uint32_t);
5885 		if (type == 0) { /* no timestamp */
5886 			IWK_DBG((IWK_DEBUG_EEPROM, "Event ID=%d, Data=%x0x",
5887 			    event_id, data1));
5888 		} else { /* timestamp */
5889 			data2 = iwk_mem_read(sc, logptr);
5890 			printf("Time=%d, Event ID=%d, Data=0x%x\n",
5891 			    data1, event_id, data2);
5892 			IWK_DBG((IWK_DEBUG_EEPROM,
5893 			    "Time=%d, Event ID=%d, Data=0x%x\n",
5894 			    data1, event_id, data2));
5895 			logptr += sizeof (uint32_t);
5896 		}
5897 	}
5898 
5899 	/*
5900 	 * Print the wrapped around entries, if any
5901 	 */
5902 	if (wraps) {
5903 		logptr = startptr;
5904 		for (i = 0; i < idx; i++) {
5905 			event_id = iwk_mem_read(sc, logptr);
5906 			logptr += sizeof (uint32_t);
5907 			data1 = iwk_mem_read(sc, logptr);
5908 			logptr += sizeof (uint32_t);
5909 			if (type == 0) { /* no timestamp */
5910 				IWK_DBG((IWK_DEBUG_EEPROM,
5911 				    "Event ID=%d, Data=%x0x", event_id, data1));
5912 			} else { /* timestamp */
5913 				data2 = iwk_mem_read(sc, logptr);
5914 				IWK_DBG((IWK_DEBUG_EEPROM,
5915 				    "Time = %d, Event ID=%d, Data=0x%x\n",
5916 				    data1, event_id, data2));
5917 				logptr += sizeof (uint32_t);
5918 			}
5919 		}
5920 	}
5921 
5922 	iwk_mac_access_exit(sc);
5923 }
5924 
5925 /*
5926  * error_event_table_ptr indicates base of the error log.  This contains
5927  * information about any uCode error that occurs.  For 4965, the format is:
5928  *
5929  * uint32_t valid;        (nonzero) valid, (0) log is empty
5930  * uint32_t error_id;     type of error
5931  * uint32_t pc;           program counter
5932  * uint32_t blink1;       branch link
5933  * uint32_t blink2;       branch link
5934  * uint32_t ilink1;       interrupt link
5935  * uint32_t ilink2;       interrupt link
5936  * uint32_t data1;        error-specific data
5937  * uint32_t data2;        error-specific data
5938  * uint32_t line;         source code line of error
5939  * uint32_t bcon_time;    beacon timer
5940  * uint32_t tsf_low;      network timestamp function timer
5941  * uint32_t tsf_hi;       network timestamp function timer
5942  */
5943 /*
5944  * iwk_write_error_log - Write error log to dmesg
5945  */
5946 static void iwk_write_error_log(iwk_sc_t *sc)
5947 {
5948 	uint32_t err_ptr;	/* Start address of error log */
5949 	uint32_t valid;		/* is error log valid */
5950 
5951 	err_ptr = sc->sc_card_alive_run.error_event_table_ptr;
5952 	if (!(err_ptr)) {
5953 		IWK_DBG((IWK_DEBUG_EEPROM, "NULL error table pointer\n"));
5954 		return;
5955 	}
5956 
5957 	iwk_mac_access_enter(sc);
5958 
5959 	valid = iwk_mem_read(sc, err_ptr);
5960 	if (!(valid)) {
5961 		IWK_DBG((IWK_DEBUG_EEPROM, "Error data not valid\n"));
5962 		iwk_mac_access_exit(sc);
5963 		return;
5964 	}
5965 	err_ptr += sizeof (uint32_t);
5966 	IWK_DBG((IWK_DEBUG_EEPROM, "err=%d ", iwk_mem_read(sc, err_ptr)));
5967 	err_ptr += sizeof (uint32_t);
5968 	IWK_DBG((IWK_DEBUG_EEPROM, "pc=0x%X ", iwk_mem_read(sc, err_ptr)));
5969 	err_ptr += sizeof (uint32_t);
5970 	IWK_DBG((IWK_DEBUG_EEPROM,
5971 	    "branch link1=0x%X ", iwk_mem_read(sc, err_ptr)));
5972 	err_ptr += sizeof (uint32_t);
5973 	IWK_DBG((IWK_DEBUG_EEPROM,
5974 	    "branch link2=0x%X ", iwk_mem_read(sc, err_ptr)));
5975 	err_ptr += sizeof (uint32_t);
5976 	IWK_DBG((IWK_DEBUG_EEPROM,
5977 	    "interrupt link1=0x%X ", iwk_mem_read(sc, err_ptr)));
5978 	err_ptr += sizeof (uint32_t);
5979 	IWK_DBG((IWK_DEBUG_EEPROM,
5980 	    "interrupt link2=0x%X ", iwk_mem_read(sc, err_ptr)));
5981 	err_ptr += sizeof (uint32_t);
5982 	IWK_DBG((IWK_DEBUG_EEPROM, "data1=0x%X ", iwk_mem_read(sc, err_ptr)));
5983 	err_ptr += sizeof (uint32_t);
5984 	IWK_DBG((IWK_DEBUG_EEPROM, "data2=0x%X ", iwk_mem_read(sc, err_ptr)));
5985 	err_ptr += sizeof (uint32_t);
5986 	IWK_DBG((IWK_DEBUG_EEPROM, "line=%d ", iwk_mem_read(sc, err_ptr)));
5987 	err_ptr += sizeof (uint32_t);
5988 	IWK_DBG((IWK_DEBUG_EEPROM, "bcon_time=%d ", iwk_mem_read(sc, err_ptr)));
5989 	err_ptr += sizeof (uint32_t);
5990 	IWK_DBG((IWK_DEBUG_EEPROM, "tsf_low=%d ", iwk_mem_read(sc, err_ptr)));
5991 	err_ptr += sizeof (uint32_t);
5992 	IWK_DBG((IWK_DEBUG_EEPROM, "tsf_hi=%d\n", iwk_mem_read(sc, err_ptr)));
5993 
5994 	iwk_mac_access_exit(sc);
5995 }
5996 
5997 static int
5998 iwk_run_state_config_ibss(ieee80211com_t *ic)
5999 {
6000 	iwk_sc_t *sc = (iwk_sc_t *)ic;
6001 	ieee80211_node_t *in = ic->ic_bss;
6002 	int i, err = IWK_SUCCESS;
6003 
6004 	mutex_enter(&sc->sc_ibss.node_tb_lock);
6005 
6006 	/*
6007 	 * clean all nodes in ibss node table assure be
6008 	 * consistent with hardware
6009 	 */
6010 	for (i = IWK_STA_ID; i < IWK_STATION_COUNT; i++) {
6011 		sc->sc_ibss.ibss_node_tb[i].used = 0;
6012 		(void) memset(&sc->sc_ibss.ibss_node_tb[i].node,
6013 		    0,
6014 		    sizeof (iwk_add_sta_t));
6015 	}
6016 
6017 	sc->sc_ibss.node_number = 0;
6018 
6019 	mutex_exit(&sc->sc_ibss.node_tb_lock);
6020 
6021 	/*
6022 	 * configure RX and TX
6023 	 */
6024 	sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
6025 
6026 	sc->sc_config.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
6027 	sc->sc_config.filter_flags =
6028 	    LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
6029 	    RXON_FILTER_DIS_DECRYPT_MSK |
6030 	    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
6031 
6032 	sc->sc_config.assoc_id = 0;
6033 
6034 	IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
6035 	sc->sc_config.chan = ieee80211_chan2ieee(ic,
6036 	    in->in_chan);
6037 
6038 	if (ic->ic_curmode == IEEE80211_MODE_11B) {
6039 		sc->sc_config.cck_basic_rates = 0x03;
6040 		sc->sc_config.ofdm_basic_rates = 0;
6041 	} else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
6042 	    (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
6043 		sc->sc_config.cck_basic_rates = 0;
6044 		sc->sc_config.ofdm_basic_rates = 0x15;
6045 
6046 	} else {
6047 		sc->sc_config.cck_basic_rates = 0x0f;
6048 		sc->sc_config.ofdm_basic_rates = 0xff;
6049 	}
6050 
6051 	sc->sc_config.flags &=
6052 	    ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
6053 	    RXON_FLG_SHORT_SLOT_MSK);
6054 
6055 	if (ic->ic_flags & IEEE80211_F_SHSLOT) {
6056 		sc->sc_config.flags |=
6057 		    LE_32(RXON_FLG_SHORT_SLOT_MSK);
6058 	}
6059 
6060 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
6061 		sc->sc_config.flags |=
6062 		    LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
6063 	}
6064 
6065 	sc->sc_config.filter_flags |=
6066 	    LE_32(RXON_FILTER_ASSOC_MSK);
6067 
6068 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
6069 	    sizeof (iwk_rxon_cmd_t), 1);
6070 	if (err != IWK_SUCCESS) {
6071 		cmn_err(CE_WARN, "iwk_run_state_config_ibss(): "
6072 		    "failed to update configuration.\n");
6073 		return (err);
6074 	}
6075 
6076 	return (err);
6077 
6078 }
6079 
6080 static int
6081 iwk_run_state_config_sta(ieee80211com_t *ic)
6082 {
6083 	iwk_sc_t *sc = (iwk_sc_t *)ic;
6084 	ieee80211_node_t *in = ic->ic_bss;
6085 	int err = IWK_SUCCESS;
6086 
6087 	/* update adapter's configuration */
6088 	if (sc->sc_assoc_id != in->in_associd) {
6089 		cmn_err(CE_WARN, "iwk_run_state_config_sta(): "
6090 		    "associate ID mismatch: expected %d, "
6091 		    "got %d\n",
6092 		    in->in_associd, sc->sc_assoc_id);
6093 	}
6094 	sc->sc_config.assoc_id = in->in_associd & 0x3fff;
6095 
6096 	/*
6097 	 * short preamble/slot time are
6098 	 * negotiated when associating
6099 	 */
6100 	sc->sc_config.flags &=
6101 	    ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
6102 	    RXON_FLG_SHORT_SLOT_MSK);
6103 
6104 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
6105 		sc->sc_config.flags |=
6106 		    LE_32(RXON_FLG_SHORT_SLOT_MSK);
6107 
6108 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
6109 		sc->sc_config.flags |=
6110 		    LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
6111 
6112 	sc->sc_config.filter_flags |=
6113 	    LE_32(RXON_FILTER_ASSOC_MSK);
6114 
6115 	if (ic->ic_opmode != IEEE80211_M_STA)
6116 		sc->sc_config.filter_flags |=
6117 		    LE_32(RXON_FILTER_BCON_AWARE_MSK);
6118 
6119 	IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x"
6120 	    " filter_flags %x\n",
6121 	    sc->sc_config.chan, sc->sc_config.flags,
6122 	    sc->sc_config.filter_flags));
6123 
6124 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
6125 	    sizeof (iwk_rxon_cmd_t), 1);
6126 	if (err != IWK_SUCCESS) {
6127 		cmn_err(CE_WARN, "iwk_run_state_config_sta(): "
6128 		    "failed to update configuration\n");
6129 		return (err);
6130 	}
6131 
6132 	return (err);
6133 }
6134 
6135 static int
6136 iwk_start_tx_beacon(ieee80211com_t *ic)
6137 {
6138 	iwk_sc_t *sc = (iwk_sc_t *)ic;
6139 	ieee80211_node_t *in = ic->ic_bss;
6140 	int err = IWK_SUCCESS;
6141 	iwk_tx_beacon_cmd_t  *tx_beacon_p;
6142 	uint16_t  masks = 0;
6143 	mblk_t *mp;
6144 	int rate;
6145 
6146 	/*
6147 	 * allocate and transmit beacon frames
6148 	 */
6149 	tx_beacon_p = &sc->sc_ibss.ibss_beacon.beacon_cmd;
6150 
6151 	(void) memset(tx_beacon_p, 0,
6152 	    sizeof (iwk_tx_beacon_cmd_t));
6153 	rate = 0;
6154 	masks = 0;
6155 
6156 	tx_beacon_p->config.sta_id = IWK_BROADCAST_ID;
6157 	tx_beacon_p->config.stop_time.life_time =
6158 	    LE_32(0xffffffff);
6159 
6160 	if (sc->sc_ibss.ibss_beacon.mp != NULL) {
6161 		freemsg(sc->sc_ibss.ibss_beacon.mp);
6162 		sc->sc_ibss.ibss_beacon.mp = NULL;
6163 	}
6164 
6165 	sc->sc_ibss.ibss_beacon.mp =
6166 	    ieee80211_beacon_alloc(ic, in,
6167 	    &sc->sc_ibss.ibss_beacon.iwk_boff);
6168 	if (sc->sc_ibss.ibss_beacon.mp == NULL) {
6169 		cmn_err(CE_WARN, "iwk_start_tx_beacon(): "
6170 		    "failed to get beacon frame.\n");
6171 		return (IWK_FAIL);
6172 	}
6173 
6174 	mp = sc->sc_ibss.ibss_beacon.mp;
6175 
6176 	ASSERT(mp->b_cont == NULL);
6177 
6178 	bcopy(mp->b_rptr, tx_beacon_p->bcon_frame, MBLKL(mp));
6179 
6180 	tx_beacon_p->config.len = (uint16_t)(MBLKL(mp));
6181 	sc->sc_ibss.ibss_beacon.beacon_cmd_len =
6182 	    sizeof (iwk_tx_cmd_t) +
6183 	    4 + tx_beacon_p->config.len;
6184 
6185 	/*
6186 	 * beacons are sent at 1M
6187 	 */
6188 	rate = in->in_rates.ir_rates[0];
6189 	rate &= IEEE80211_RATE_VAL;
6190 
6191 	if (2 == rate || 4 == rate || 11 == rate ||
6192 	    22 == rate) {
6193 		masks |= RATE_MCS_CCK_MSK;
6194 	}
6195 
6196 	masks |= RATE_MCS_ANT_B_MSK;
6197 
6198 	tx_beacon_p->config.rate.r.rate_n_flags =
6199 	    (iwk_rate_to_plcp(rate) | masks);
6200 
6201 
6202 	tx_beacon_p->config.tx_flags =
6203 	    (TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK);
6204 
6205 	if (ic->ic_bss->in_tstamp.tsf != 0) {
6206 		sc->sc_ibss.ibss_beacon.syncbeacon = 1;
6207 	} else {
6208 		if (ieee80211_beacon_update(ic, in,
6209 		    &sc->sc_ibss.ibss_beacon.iwk_boff,
6210 		    mp, 0)) {
6211 			bcopy(mp->b_rptr,
6212 			    tx_beacon_p->bcon_frame,
6213 			    MBLKL(mp));
6214 		}
6215 
6216 		err = iwk_cmd(sc, REPLY_TX_BEACON,
6217 		    tx_beacon_p,
6218 		    sc->sc_ibss.ibss_beacon.beacon_cmd_len,
6219 		    1);
6220 		if (err != IWK_SUCCESS) {
6221 			cmn_err(CE_WARN, "iwk_start_tx_beacon(): "
6222 			    "failed to TX beacon.\n");
6223 			return (err);
6224 		}
6225 
6226 		sc->sc_ibss.ibss_beacon.syncbeacon = 0;
6227 	}
6228 
6229 	return (err);
6230 }
6231 
6232 static int
6233 iwk_clean_add_node_ibss(struct ieee80211com *ic,
6234     uint8_t addr[IEEE80211_ADDR_LEN], uint8_t *index2)
6235 {
6236 	iwk_sc_t *sc = (iwk_sc_t *)ic;
6237 	uint8_t	index;
6238 	iwk_add_sta_t bc_node;
6239 	iwk_link_quality_cmd_t bc_link_quality;
6240 	iwk_link_quality_cmd_t link_quality;
6241 	uint16_t  bc_masks = 0;
6242 	uint16_t  masks = 0;
6243 	int i, rate;
6244 	struct ieee80211_rateset rs;
6245 	iwk_ibss_node_t *ibss_node_p;
6246 	int err = IWK_SUCCESS;
6247 
6248 	/*
6249 	 * find a location that is not
6250 	 * used in ibss node table
6251 	 */
6252 	for (index = IWK_STA_ID;
6253 	    index < IWK_STATION_COUNT; index++) {
6254 		if (!sc->sc_ibss.ibss_node_tb[index].used) {
6255 			break;
6256 		}
6257 	}
6258 
6259 	/*
6260 	 * if have too many nodes in hardware, clean up
6261 	 */
6262 	if (index < IWK_BROADCAST_ID &&
6263 	    sc->sc_ibss.node_number >= 25) {
6264 		if (iwk_cmd(sc, REPLY_REMOVE_ALL_STA,
6265 		    NULL, 0, 1) != IWK_SUCCESS) {
6266 			cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6267 			    "failed to remove all nodes in hardware\n");
6268 			return (IWK_FAIL);
6269 		}
6270 
6271 		for (i = IWK_STA_ID; i < IWK_STATION_COUNT; i++) {
6272 			sc->sc_ibss.ibss_node_tb[i].used = 0;
6273 			(void) memset(&sc->sc_ibss.ibss_node_tb[i].node,
6274 			    0, sizeof (iwk_add_sta_t));
6275 		}
6276 
6277 		sc->sc_ibss.node_number = 0;
6278 
6279 		/*
6280 		 * add broadcast node so that we
6281 		 * can send broadcast frame
6282 		 */
6283 		(void) memset(&bc_node, 0, sizeof (bc_node));
6284 		(void) memset(bc_node.bssid, 0xff, 6);
6285 		bc_node.id = IWK_BROADCAST_ID;
6286 
6287 		err = iwk_cmd(sc, REPLY_ADD_STA, &bc_node, sizeof (bc_node), 1);
6288 		if (err != IWK_SUCCESS) {
6289 		cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6290 		    "failed to add broadcast node\n");
6291 		return (err);
6292 		}
6293 
6294 		/* TX_LINK_QUALITY cmd */
6295 		(void) memset(&bc_link_quality, 0, sizeof (bc_link_quality));
6296 		for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
6297 			bc_masks |= RATE_MCS_CCK_MSK;
6298 			bc_masks |= RATE_MCS_ANT_B_MSK;
6299 			bc_masks &= ~RATE_MCS_ANT_A_MSK;
6300 			bc_link_quality.rate_n_flags[i] =
6301 			    iwk_rate_to_plcp(2) | bc_masks;
6302 		}
6303 
6304 		bc_link_quality.general_params.single_stream_ant_msk = 2;
6305 		bc_link_quality.general_params.dual_stream_ant_msk = 3;
6306 		bc_link_quality.agg_params.agg_dis_start_th = 3;
6307 		bc_link_quality.agg_params.agg_time_limit = LE_16(4000);
6308 		bc_link_quality.sta_id = IWK_BROADCAST_ID;
6309 
6310 		err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD,
6311 		    &bc_link_quality, sizeof (bc_link_quality), 1);
6312 		if (err != IWK_SUCCESS) {
6313 			cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6314 			    "failed to config link quality table\n");
6315 			return (err);
6316 		}
6317 	}
6318 
6319 	if (index >= IWK_BROADCAST_ID) {
6320 		cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6321 		    "the count of node in hardware is too much\n");
6322 		return (IWK_FAIL);
6323 	}
6324 
6325 	/*
6326 	 * add a node into hardware
6327 	 */
6328 	ibss_node_p = &sc->sc_ibss.ibss_node_tb[index];
6329 
6330 	ibss_node_p->used = 1;
6331 
6332 	(void) memset(&ibss_node_p->node, 0,
6333 	    sizeof (iwk_add_sta_t));
6334 
6335 	IEEE80211_ADDR_COPY(ibss_node_p->node.bssid, addr);
6336 	ibss_node_p->node.id = index;
6337 	ibss_node_p->node.control = 0;
6338 	ibss_node_p->node.flags = 0;
6339 
6340 	err = iwk_cmd(sc, REPLY_ADD_STA, &ibss_node_p->node,
6341 	    sizeof (iwk_add_sta_t), 1);
6342 	if (err != IWK_SUCCESS) {
6343 		cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6344 		    "failed to add IBSS node\n");
6345 		ibss_node_p->used = 0;
6346 		(void) memset(&ibss_node_p->node, 0,
6347 		    sizeof (iwk_add_sta_t));
6348 		return (err);
6349 	}
6350 
6351 	sc->sc_ibss.node_number++;
6352 
6353 	(void) memset(&link_quality, 0, sizeof (link_quality));
6354 
6355 	rs = ic->ic_sup_rates[ieee80211_chan2mode(ic,
6356 	    ic->ic_curchan)];
6357 
6358 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
6359 		if (i < rs.ir_nrates) {
6360 			rate = rs.
6361 			    ir_rates[rs.ir_nrates - i];
6362 		} else {
6363 			rate = 2;
6364 		}
6365 
6366 		if (2 == rate || 4 == rate ||
6367 		    11 == rate || 22 == rate) {
6368 			masks |= RATE_MCS_CCK_MSK;
6369 		}
6370 
6371 		masks |= RATE_MCS_ANT_B_MSK;
6372 		masks &= ~RATE_MCS_ANT_A_MSK;
6373 
6374 		link_quality.rate_n_flags[i] =
6375 		    iwk_rate_to_plcp(rate) | masks;
6376 	}
6377 
6378 	link_quality.general_params.single_stream_ant_msk = 2;
6379 	link_quality.general_params.dual_stream_ant_msk = 3;
6380 	link_quality.agg_params.agg_dis_start_th = 3;
6381 	link_quality.agg_params.agg_time_limit = LE_16(4000);
6382 	link_quality.sta_id = ibss_node_p->node.id;
6383 
6384 	err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD,
6385 	    &link_quality, sizeof (link_quality), 1);
6386 	if (err != IWK_SUCCESS) {
6387 		cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6388 		    "failed to set up TX link quality\n");
6389 		ibss_node_p->used = 0;
6390 		(void) memset(ibss_node_p->node.bssid, 0, 6);
6391 		return (err);
6392 	}
6393 
6394 	*index2 = index;
6395 
6396 	return (err);
6397 }
6398