xref: /titanic_41/usr/src/uts/common/io/iwk/iwk2.c (revision 2e02daeede04af58a9d4f18f8dfed1fda3ececa1)
1 /*
2  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright (c) 2007, Intel Corporation
8  * All rights reserved.
9  */
10 
11 /*
12  * Copyright (c) 2006
13  * Copyright (c) 2007
14  *	Damien Bergamini <damien.bergamini@free.fr>
15  *
16  * Permission to use, copy, modify, and distribute this software for any
17  * purpose with or without fee is hereby granted, provided that the above
18  * copyright notice and this permission notice appear in all copies.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
21  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
22  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
24  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
25  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
26  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27  */
28 
29 /*
30  * Driver for Intel PRO/Wireless 4965AGN(kedron) 802.11 network adapters.
31  */
32 
33 #include <sys/types.h>
34 #include <sys/byteorder.h>
35 #include <sys/conf.h>
36 #include <sys/cmn_err.h>
37 #include <sys/stat.h>
38 #include <sys/ddi.h>
39 #include <sys/sunddi.h>
40 #include <sys/strsubr.h>
41 #include <sys/ethernet.h>
42 #include <inet/common.h>
43 #include <inet/nd.h>
44 #include <inet/mi.h>
45 #include <sys/note.h>
46 #include <sys/stream.h>
47 #include <sys/strsun.h>
48 #include <sys/modctl.h>
49 #include <sys/devops.h>
50 #include <sys/dlpi.h>
51 #include <sys/mac_provider.h>
52 #include <sys/mac_wifi.h>
53 #include <sys/net80211.h>
54 #include <sys/net80211_proto.h>
55 #include <sys/varargs.h>
56 #include <sys/policy.h>
57 #include <sys/pci.h>
58 
59 #include "iwk_calibration.h"
60 #include "iwk_hw.h"
61 #include "iwk_eeprom.h"
62 #include "iwk2_var.h"
63 #include <inet/wifi_ioctl.h>
64 
65 #ifdef DEBUG
66 #define	IWK_DEBUG_80211		(1 << 0)
67 #define	IWK_DEBUG_CMD		(1 << 1)
68 #define	IWK_DEBUG_DMA		(1 << 2)
69 #define	IWK_DEBUG_EEPROM	(1 << 3)
70 #define	IWK_DEBUG_FW		(1 << 4)
71 #define	IWK_DEBUG_HW		(1 << 5)
72 #define	IWK_DEBUG_INTR		(1 << 6)
73 #define	IWK_DEBUG_MRR		(1 << 7)
74 #define	IWK_DEBUG_PIO		(1 << 8)
75 #define	IWK_DEBUG_RX		(1 << 9)
76 #define	IWK_DEBUG_SCAN		(1 << 10)
77 #define	IWK_DEBUG_TX		(1 << 11)
78 #define	IWK_DEBUG_RATECTL	(1 << 12)
79 #define	IWK_DEBUG_RADIO		(1 << 13)
80 #define	IWK_DEBUG_RESUME	(1 << 14)
81 #define	IWK_DEBUG_CALIBRATION	(1 << 15)
82 uint32_t iwk_dbg_flags = 0;
83 #define	IWK_DBG(x) \
84 	iwk_dbg x
85 #else
86 #define	IWK_DBG(x)
87 #endif
88 
89 static void	*iwk_soft_state_p = NULL;
90 static uint8_t iwk_fw_bin [] = {
91 #include "fw-iw/iw4965.ucode.hex"
92 };
93 
94 /* DMA attributes for a shared page */
95 static ddi_dma_attr_t sh_dma_attr = {
96 	DMA_ATTR_V0,	/* version of this structure */
97 	0,		/* lowest usable address */
98 	0xffffffffU,	/* highest usable address */
99 	0xffffffffU,	/* maximum DMAable byte count */
100 	0x1000,		/* alignment in bytes */
101 	0x1000,		/* burst sizes (any?) */
102 	1,		/* minimum transfer */
103 	0xffffffffU,	/* maximum transfer */
104 	0xffffffffU,	/* maximum segment length */
105 	1,		/* maximum number of segments */
106 	1,		/* granularity */
107 	0,		/* flags (reserved) */
108 };
109 
110 /* DMA attributes for a keep warm DRAM descriptor */
111 static ddi_dma_attr_t kw_dma_attr = {
112 	DMA_ATTR_V0,	/* version of this structure */
113 	0,		/* lowest usable address */
114 	0xffffffffU,	/* highest usable address */
115 	0xffffffffU,	/* maximum DMAable byte count */
116 	0x1000,		/* alignment in bytes */
117 	0x1000,		/* burst sizes (any?) */
118 	1,		/* minimum transfer */
119 	0xffffffffU,	/* maximum transfer */
120 	0xffffffffU,	/* maximum segment length */
121 	1,		/* maximum number of segments */
122 	1,		/* granularity */
123 	0,		/* flags (reserved) */
124 };
125 
126 /* DMA attributes for a ring descriptor */
127 static ddi_dma_attr_t ring_desc_dma_attr = {
128 	DMA_ATTR_V0,	/* version of this structure */
129 	0,		/* lowest usable address */
130 	0xffffffffU,	/* highest usable address */
131 	0xffffffffU,	/* maximum DMAable byte count */
132 	0x100,		/* alignment in bytes */
133 	0x100,		/* burst sizes (any?) */
134 	1,		/* minimum transfer */
135 	0xffffffffU,	/* maximum transfer */
136 	0xffffffffU,	/* maximum segment length */
137 	1,		/* maximum number of segments */
138 	1,		/* granularity */
139 	0,		/* flags (reserved) */
140 };
141 
142 /* DMA attributes for a cmd */
143 static ddi_dma_attr_t cmd_dma_attr = {
144 	DMA_ATTR_V0,	/* version of this structure */
145 	0,		/* lowest usable address */
146 	0xffffffffU,	/* highest usable address */
147 	0xffffffffU,	/* maximum DMAable byte count */
148 	4,		/* alignment in bytes */
149 	0x100,		/* burst sizes (any?) */
150 	1,		/* minimum transfer */
151 	0xffffffffU,	/* maximum transfer */
152 	0xffffffffU,	/* maximum segment length */
153 	1,		/* maximum number of segments */
154 	1,		/* granularity */
155 	0,		/* flags (reserved) */
156 };
157 
158 /* DMA attributes for a rx buffer */
159 static ddi_dma_attr_t rx_buffer_dma_attr = {
160 	DMA_ATTR_V0,	/* version of this structure */
161 	0,		/* lowest usable address */
162 	0xffffffffU,	/* highest usable address */
163 	0xffffffffU,	/* maximum DMAable byte count */
164 	0x100,		/* alignment in bytes */
165 	0x100,		/* burst sizes (any?) */
166 	1,		/* minimum transfer */
167 	0xffffffffU,	/* maximum transfer */
168 	0xffffffffU,	/* maximum segment length */
169 	1,		/* maximum number of segments */
170 	1,		/* granularity */
171 	0,		/* flags (reserved) */
172 };
173 
174 /*
175  * DMA attributes for a tx buffer.
176  * the maximum number of segments is 4 for the hardware.
177  * now all the wifi drivers put the whole frame in a single
178  * descriptor, so we define the maximum  number of segments 1,
179  * just the same as the rx_buffer. we consider leverage the HW
180  * ability in the future, that is why we don't define rx and tx
181  * buffer_dma_attr as the same.
182  */
183 static ddi_dma_attr_t tx_buffer_dma_attr = {
184 	DMA_ATTR_V0,	/* version of this structure */
185 	0,		/* lowest usable address */
186 	0xffffffffU,	/* highest usable address */
187 	0xffffffffU,	/* maximum DMAable byte count */
188 	4,		/* alignment in bytes */
189 	0x100,		/* burst sizes (any?) */
190 	1,		/* minimum transfer */
191 	0xffffffffU,	/* maximum transfer */
192 	0xffffffffU,	/* maximum segment length */
193 	1,		/* maximum number of segments */
194 	1,		/* granularity */
195 	0,		/* flags (reserved) */
196 };
197 
198 /* DMA attributes for text and data part in the firmware */
199 static ddi_dma_attr_t fw_dma_attr = {
200 	DMA_ATTR_V0,	/* version of this structure */
201 	0,		/* lowest usable address */
202 	0xffffffffU,	/* highest usable address */
203 	0x7fffffff,	/* maximum DMAable byte count */
204 	0x10,		/* alignment in bytes */
205 	0x100,		/* burst sizes (any?) */
206 	1,		/* minimum transfer */
207 	0xffffffffU,	/* maximum transfer */
208 	0xffffffffU,	/* maximum segment length */
209 	1,		/* maximum number of segments */
210 	1,		/* granularity */
211 	0,		/* flags (reserved) */
212 };
213 
214 
215 /* regs access attributes */
216 static ddi_device_acc_attr_t iwk_reg_accattr = {
217 	DDI_DEVICE_ATTR_V0,
218 	DDI_STRUCTURE_LE_ACC,
219 	DDI_STRICTORDER_ACC,
220 	DDI_DEFAULT_ACC
221 };
222 
223 /* DMA access attributes */
224 static ddi_device_acc_attr_t iwk_dma_accattr = {
225 	DDI_DEVICE_ATTR_V0,
226 	DDI_NEVERSWAP_ACC,
227 	DDI_STRICTORDER_ACC,
228 	DDI_DEFAULT_ACC
229 };
230 
231 static int	iwk_ring_init(iwk_sc_t *);
232 static void	iwk_ring_free(iwk_sc_t *);
233 static int	iwk_alloc_shared(iwk_sc_t *);
234 static void	iwk_free_shared(iwk_sc_t *);
235 static int	iwk_alloc_kw(iwk_sc_t *);
236 static void	iwk_free_kw(iwk_sc_t *);
237 static int	iwk_alloc_fw_dma(iwk_sc_t *);
238 static void	iwk_free_fw_dma(iwk_sc_t *);
239 static int	iwk_alloc_rx_ring(iwk_sc_t *);
240 static void	iwk_reset_rx_ring(iwk_sc_t *);
241 static void	iwk_free_rx_ring(iwk_sc_t *);
242 static int	iwk_alloc_tx_ring(iwk_sc_t *, iwk_tx_ring_t *,
243     int, int);
244 static void	iwk_reset_tx_ring(iwk_sc_t *, iwk_tx_ring_t *);
245 static void	iwk_free_tx_ring(iwk_sc_t *, iwk_tx_ring_t *);
246 
247 static ieee80211_node_t *iwk_node_alloc(ieee80211com_t *);
248 static void	iwk_node_free(ieee80211_node_t *);
249 static int	iwk_newstate(ieee80211com_t *, enum ieee80211_state, int);
250 static int	iwk_key_set(ieee80211com_t *, const struct ieee80211_key *,
251     const uint8_t mac[IEEE80211_ADDR_LEN]);
252 static void	iwk_mac_access_enter(iwk_sc_t *);
253 static void	iwk_mac_access_exit(iwk_sc_t *);
254 static uint32_t	iwk_reg_read(iwk_sc_t *, uint32_t);
255 static void	iwk_reg_write(iwk_sc_t *, uint32_t, uint32_t);
256 static void	iwk_reg_write_region_4(iwk_sc_t *, uint32_t,
257 		    uint32_t *, int);
258 static int	iwk_load_firmware(iwk_sc_t *);
259 static void	iwk_rx_intr(iwk_sc_t *, iwk_rx_desc_t *,
260 		    iwk_rx_data_t *);
261 static void	iwk_tx_intr(iwk_sc_t *, iwk_rx_desc_t *,
262 		    iwk_rx_data_t *);
263 static void	iwk_cmd_intr(iwk_sc_t *, iwk_rx_desc_t *);
264 static uint_t   iwk_intr(caddr_t, caddr_t);
265 static int	iwk_eep_load(iwk_sc_t *sc);
266 static void	iwk_get_mac_from_eep(iwk_sc_t *sc);
267 static int	iwk_eep_sem_down(iwk_sc_t *sc);
268 static void	iwk_eep_sem_up(iwk_sc_t *sc);
269 static uint_t   iwk_rx_softintr(caddr_t, caddr_t);
270 static uint8_t	iwk_rate_to_plcp(int);
271 static int	iwk_cmd(iwk_sc_t *, int, const void *, int, int);
272 static void	iwk_set_led(iwk_sc_t *, uint8_t, uint8_t, uint8_t);
273 static int	iwk_hw_set_before_auth(iwk_sc_t *);
274 static int	iwk_scan(iwk_sc_t *);
275 static int	iwk_config(iwk_sc_t *);
276 static void	iwk_stop_master(iwk_sc_t *);
277 static int	iwk_power_up(iwk_sc_t *);
278 static int	iwk_preinit(iwk_sc_t *);
279 static int	iwk_init(iwk_sc_t *);
280 static void	iwk_stop(iwk_sc_t *);
281 static void	iwk_amrr_init(iwk_amrr_t *);
282 static void	iwk_amrr_timeout(iwk_sc_t *);
283 static void	iwk_amrr_ratectl(void *, ieee80211_node_t *);
284 static int32_t	iwk_curr_tempera(iwk_sc_t *sc);
285 static int	iwk_tx_power_calibration(iwk_sc_t *sc);
286 static inline int	iwk_is_24G_band(iwk_sc_t *sc);
287 static inline int	iwk_is_fat_channel(iwk_sc_t *sc);
288 static int	iwk_txpower_grp(uint16_t channel);
289 static struct	iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc,
290     uint16_t channel,
291     int is_24G, int is_fat, int is_hi_chan);
292 static int32_t	iwk_band_number(iwk_sc_t *sc, uint16_t channel);
293 static int	iwk_division(int32_t num, int32_t denom, int32_t *res);
294 static int32_t	iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1,
295     int32_t x2, int32_t y2);
296 static int	iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel,
297     struct iwk_eep_calib_channel_info *chan_info);
298 static int32_t	iwk_voltage_compensation(int32_t eep_voltage,
299     int32_t curr_voltage);
300 static int32_t	iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G);
301 static int	iwk_txpower_table_cmd_init(iwk_sc_t *sc,
302     struct iwk_tx_power_db *tp_db);
303 static void	iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc);
304 static int	iwk_is_associated(iwk_sc_t *sc);
305 static int	iwk_rxgain_diff_init(iwk_sc_t *sc);
306 static int	iwk_rxgain_diff(iwk_sc_t *sc);
307 static int	iwk_rx_sens_init(iwk_sc_t *sc);
308 static int	iwk_rx_sens(iwk_sc_t *sc);
309 static int	iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time);
310 static int	iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time);
311 static void	iwk_recv_mgmt(struct ieee80211com *ic, mblk_t *mp,
312     struct ieee80211_node *in, int subtype, int rssi, uint32_t rstamp);
313 
314 static void	iwk_write_event_log(iwk_sc_t *);
315 static void	iwk_write_error_log(iwk_sc_t *);
316 
317 static int	iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
318 static int	iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
319 static int	iwk_quiesce(dev_info_t *dip);
320 
321 /*
322  * GLD specific operations
323  */
324 static int	iwk_m_stat(void *arg, uint_t stat, uint64_t *val);
325 static int	iwk_m_start(void *arg);
326 static void	iwk_m_stop(void *arg);
327 static int	iwk_m_unicst(void *arg, const uint8_t *macaddr);
328 static int	iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m);
329 static int	iwk_m_promisc(void *arg, boolean_t on);
330 static mblk_t 	*iwk_m_tx(void *arg, mblk_t *mp);
331 static void	iwk_m_ioctl(void *arg, queue_t *wq, mblk_t *mp);
332 static int	iwk_m_setprop(void *arg, const char *pr_name,
333 	mac_prop_id_t wldp_pr_name, uint_t wldp_length, const void *wldp_buf);
334 static int	iwk_m_getprop(void *arg, const char *pr_name,
335 	mac_prop_id_t wldp_pr_name, uint_t pr_flags, uint_t wldp_length,
336 	void *wldp_buf, uint_t *perm);
337 static void	iwk_destroy_locks(iwk_sc_t *sc);
338 static int	iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type);
339 static void	iwk_thread(iwk_sc_t *sc);
340 static void	iwk_watchdog(void *arg);
341 static int	iwk_run_state_config_ibss(ieee80211com_t *ic);
342 static int	iwk_run_state_config_sta(ieee80211com_t *ic);
343 static int	iwk_start_tx_beacon(ieee80211com_t *ic);
344 static int	iwk_clean_add_node_ibss(struct ieee80211com *ic,
345     uint8_t addr[IEEE80211_ADDR_LEN], uint8_t *index2);
346 
347 /*
348  * Supported rates for 802.11b/g modes (in 500Kbps unit).
349  * 11a and 11n support will be added later.
350  */
351 static const struct ieee80211_rateset iwk_rateset_11b =
352 	{ 4, { 2, 4, 11, 22 } };
353 
354 static const struct ieee80211_rateset iwk_rateset_11g =
355 	{ 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
356 
357 /*
358  * For mfthread only
359  */
360 extern pri_t minclsyspri;
361 
362 #define	DRV_NAME_4965	"iwk"
363 
364 /*
365  * Module Loading Data & Entry Points
366  */
367 DDI_DEFINE_STREAM_OPS(iwk_devops, nulldev, nulldev, iwk_attach,
368     iwk_detach, nodev, NULL, D_MP, NULL, iwk_quiesce);
369 
370 static struct modldrv iwk_modldrv = {
371 	&mod_driverops,
372 	"Intel(R) 4965AGN driver(N)",
373 	&iwk_devops
374 };
375 
376 static struct modlinkage iwk_modlinkage = {
377 	MODREV_1,
378 	&iwk_modldrv,
379 	NULL
380 };
381 
382 int
383 _init(void)
384 {
385 	int	status;
386 
387 	status = ddi_soft_state_init(&iwk_soft_state_p,
388 	    sizeof (iwk_sc_t), 1);
389 	if (status != DDI_SUCCESS)
390 		return (status);
391 
392 	mac_init_ops(&iwk_devops, DRV_NAME_4965);
393 	status = mod_install(&iwk_modlinkage);
394 	if (status != DDI_SUCCESS) {
395 		mac_fini_ops(&iwk_devops);
396 		ddi_soft_state_fini(&iwk_soft_state_p);
397 	}
398 
399 	return (status);
400 }
401 
402 int
403 _fini(void)
404 {
405 	int status;
406 
407 	status = mod_remove(&iwk_modlinkage);
408 	if (status == DDI_SUCCESS) {
409 		mac_fini_ops(&iwk_devops);
410 		ddi_soft_state_fini(&iwk_soft_state_p);
411 	}
412 
413 	return (status);
414 }
415 
416 int
417 _info(struct modinfo *mip)
418 {
419 	return (mod_info(&iwk_modlinkage, mip));
420 }
421 
422 /*
423  * Mac Call Back entries
424  */
425 mac_callbacks_t	iwk_m_callbacks = {
426 	MC_IOCTL | MC_SETPROP | MC_GETPROP,
427 	iwk_m_stat,
428 	iwk_m_start,
429 	iwk_m_stop,
430 	iwk_m_promisc,
431 	iwk_m_multicst,
432 	iwk_m_unicst,
433 	iwk_m_tx,
434 	iwk_m_ioctl,
435 	NULL,
436 	NULL,
437 	NULL,
438 	iwk_m_setprop,
439 	iwk_m_getprop
440 };
441 
442 #ifdef DEBUG
443 void
444 iwk_dbg(uint32_t flags, const char *fmt, ...)
445 {
446 	va_list	ap;
447 
448 	if (flags & iwk_dbg_flags) {
449 		va_start(ap, fmt);
450 		vcmn_err(CE_NOTE, fmt, ap);
451 		va_end(ap);
452 	}
453 }
454 #endif
455 
456 /*
457  * device operations
458  */
459 int
460 iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
461 {
462 	iwk_sc_t		*sc;
463 	ieee80211com_t	*ic;
464 	int			instance, err, i;
465 	char			strbuf[32];
466 	wifi_data_t		wd = { 0 };
467 	mac_register_t		*macp;
468 
469 	int			intr_type;
470 	int			intr_count;
471 	int			intr_actual;
472 
473 	switch (cmd) {
474 	case DDI_ATTACH:
475 		break;
476 	case DDI_RESUME:
477 		sc = ddi_get_soft_state(iwk_soft_state_p,
478 		    ddi_get_instance(dip));
479 		ASSERT(sc != NULL);
480 		if (sc->sc_flags & IWK_F_RUNNING)
481 			(void) iwk_init(sc);
482 
483 		mutex_enter(&sc->sc_glock);
484 		sc->sc_flags &= ~IWK_F_SUSPEND;
485 		sc->sc_flags |= IWK_F_LAZY_RESUME;
486 		mutex_exit(&sc->sc_glock);
487 
488 		IWK_DBG((IWK_DEBUG_RESUME, "iwk: resume\n"));
489 		return (DDI_SUCCESS);
490 	default:
491 		err = DDI_FAILURE;
492 		goto attach_fail1;
493 	}
494 
495 	instance = ddi_get_instance(dip);
496 	err = ddi_soft_state_zalloc(iwk_soft_state_p, instance);
497 	if (err != DDI_SUCCESS) {
498 		cmn_err(CE_WARN,
499 		    "iwk_attach(): failed to allocate soft state\n");
500 		goto attach_fail1;
501 	}
502 	sc = ddi_get_soft_state(iwk_soft_state_p, instance);
503 	sc->sc_dip = dip;
504 
505 	err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0,
506 	    &iwk_reg_accattr, &sc->sc_cfg_handle);
507 	if (err != DDI_SUCCESS) {
508 		cmn_err(CE_WARN,
509 		    "iwk_attach(): failed to map config spaces regs\n");
510 		goto attach_fail2;
511 	}
512 	sc->sc_rev = ddi_get8(sc->sc_cfg_handle,
513 	    (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID));
514 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0x41), 0);
515 	sc->sc_clsz = ddi_get16(sc->sc_cfg_handle,
516 	    (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ));
517 	if (!sc->sc_clsz)
518 		sc->sc_clsz = 16;
519 	sc->sc_clsz = (sc->sc_clsz << 2);
520 	sc->sc_dmabuf_sz = roundup(0x1000 + sizeof (struct ieee80211_frame) +
521 	    IEEE80211_MTU + IEEE80211_CRC_LEN +
522 	    (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
523 	    IEEE80211_WEP_CRCLEN), sc->sc_clsz);
524 	/*
525 	 * Map operating registers
526 	 */
527 	err = ddi_regs_map_setup(dip, 1, &sc->sc_base,
528 	    0, 0, &iwk_reg_accattr, &sc->sc_handle);
529 	if (err != DDI_SUCCESS) {
530 		cmn_err(CE_WARN,
531 		    "iwk_attach(): failed to map device regs\n");
532 		goto attach_fail2a;
533 	}
534 
535 	err = ddi_intr_get_supported_types(dip, &intr_type);
536 	if ((err != DDI_SUCCESS) || (!(intr_type & DDI_INTR_TYPE_FIXED))) {
537 		cmn_err(CE_WARN, "iwk_attach(): "
538 		    "Fixed type interrupt is not supported\n");
539 		goto attach_fail_intr_a;
540 	}
541 
542 	err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &intr_count);
543 	if ((err != DDI_SUCCESS) || (intr_count != 1)) {
544 		cmn_err(CE_WARN, "iwk_attach(): "
545 		    "No fixed interrupts\n");
546 		goto attach_fail_intr_a;
547 	}
548 
549 	sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
550 
551 	err = ddi_intr_alloc(dip, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0,
552 	    intr_count, &intr_actual, 0);
553 	if ((err != DDI_SUCCESS) || (intr_actual != 1)) {
554 		cmn_err(CE_WARN, "iwk_attach(): "
555 		    "ddi_intr_alloc() failed 0x%x\n", err);
556 		goto attach_fail_intr_b;
557 	}
558 
559 	err = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri);
560 	if (err != DDI_SUCCESS) {
561 		cmn_err(CE_WARN, "iwk_attach(): "
562 		    "ddi_intr_get_pri() failed 0x%x\n", err);
563 		goto attach_fail_intr_c;
564 	}
565 
566 	mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER,
567 	    DDI_INTR_PRI(sc->sc_intr_pri));
568 	mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER,
569 	    DDI_INTR_PRI(sc->sc_intr_pri));
570 	mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER,
571 	    DDI_INTR_PRI(sc->sc_intr_pri));
572 	mutex_init(&sc->sc_ibss.node_tb_lock, NULL, MUTEX_DRIVER,
573 	    DDI_INTR_PRI(sc->sc_intr_pri));
574 
575 	cv_init(&sc->sc_fw_cv, NULL, CV_DRIVER, NULL);
576 	cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL);
577 	cv_init(&sc->sc_tx_cv, "tx-ring", CV_DRIVER, NULL);
578 	/*
579 	 * initialize the mfthread
580 	 */
581 	cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL);
582 	sc->sc_mf_thread = NULL;
583 	sc->sc_mf_thread_switch = 0;
584 
585 	/*
586 	 * Allocate shared page.
587 	 */
588 	err = iwk_alloc_shared(sc);
589 	if (err != DDI_SUCCESS) {
590 		cmn_err(CE_WARN, "iwk_attach(): "
591 		    "failed to allocate shared page\n");
592 		goto attach_fail3;
593 	}
594 
595 	/*
596 	 * Allocate keep warm page.
597 	 */
598 	err = iwk_alloc_kw(sc);
599 	if (err != DDI_SUCCESS) {
600 		cmn_err(CE_WARN, "iwk_attach(): "
601 		    "failed to allocate keep warm page\n");
602 		goto attach_fail3a;
603 	}
604 
605 	/*
606 	 * Do some necessary hardware initializations.
607 	 */
608 	err = iwk_preinit(sc);
609 	if (err != DDI_SUCCESS) {
610 		cmn_err(CE_WARN, "iwk_attach(): "
611 		    "failed to init hardware\n");
612 		goto attach_fail4;
613 	}
614 
615 	/* initialize EEPROM */
616 	err = iwk_eep_load(sc);  /* get hardware configurations from eeprom */
617 	if (err != 0) {
618 		cmn_err(CE_WARN, "iwk_attach(): failed to load eeprom\n");
619 		goto attach_fail4;
620 	}
621 
622 	if (sc->sc_eep_map.calib_version < EEP_TX_POWER_VERSION_NEW) {
623 		cmn_err(CE_WARN, "older EEPROM detected\n");
624 		goto attach_fail4;
625 	}
626 
627 	iwk_get_mac_from_eep(sc);
628 
629 	err = iwk_ring_init(sc);
630 	if (err != DDI_SUCCESS) {
631 		cmn_err(CE_WARN, "iwk_attach(): "
632 		    "failed to allocate and initialize ring\n");
633 		goto attach_fail4;
634 	}
635 
636 	sc->sc_hdr = (iwk_firmware_hdr_t *)iwk_fw_bin;
637 
638 	err = iwk_alloc_fw_dma(sc);
639 	if (err != DDI_SUCCESS) {
640 		cmn_err(CE_WARN, "iwk_attach(): "
641 		    "failed to allocate firmware dma\n");
642 		goto attach_fail5;
643 	}
644 
645 	/*
646 	 * Initialize the wifi part, which will be used by
647 	 * generic layer
648 	 */
649 	ic = &sc->sc_ic;
650 	ic->ic_phytype  = IEEE80211_T_OFDM;
651 	ic->ic_opmode   = IEEE80211_M_STA; /* default to BSS mode */
652 	ic->ic_state    = IEEE80211_S_INIT;
653 	ic->ic_maxrssi  = 100; /* experimental number */
654 	ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT |
655 	    IEEE80211_C_PMGT | IEEE80211_C_SHSLOT;
656 	/*
657 	 * use software WEP and TKIP, hardware CCMP;
658 	 */
659 	ic->ic_caps |= IEEE80211_C_AES_CCM;
660 	/*
661 	 * Support WPA/WPA2
662 	 */
663 	ic->ic_caps |= IEEE80211_C_WPA;
664 	/*
665 	 * support Adhoc mode
666 	 */
667 	ic->ic_caps |= IEEE80211_C_IBSS;
668 
669 	/* set supported .11b and .11g rates */
670 	ic->ic_sup_rates[IEEE80211_MODE_11B] = iwk_rateset_11b;
671 	ic->ic_sup_rates[IEEE80211_MODE_11G] = iwk_rateset_11g;
672 
673 	/* set supported .11b and .11g channels (1 through 11) */
674 	for (i = 1; i <= 11; i++) {
675 		ic->ic_sup_channels[i].ich_freq =
676 		    ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
677 		ic->ic_sup_channels[i].ich_flags =
678 		    IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
679 		    IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ |
680 		    IEEE80211_CHAN_PASSIVE;
681 	}
682 	ic->ic_ibss_chan = &ic->ic_sup_channels[0];
683 
684 	ic->ic_xmit = iwk_send;
685 	/*
686 	 * init Wifi layer
687 	 */
688 	ieee80211_attach(ic);
689 
690 	/*
691 	 * different instance has different WPA door
692 	 */
693 	(void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
694 	    ddi_driver_name(dip),
695 	    ddi_get_instance(dip));
696 
697 	/*
698 	 * Override 80211 default routines
699 	 */
700 	sc->sc_newstate = ic->ic_newstate;
701 	ic->ic_newstate = iwk_newstate;
702 	ic->ic_watchdog = iwk_watchdog;
703 	sc->sc_recv_mgmt = ic->ic_recv_mgmt;
704 	ic->ic_recv_mgmt = iwk_recv_mgmt;
705 	ic->ic_node_alloc = iwk_node_alloc;
706 	ic->ic_node_free = iwk_node_free;
707 	ic->ic_crypto.cs_key_set = iwk_key_set;
708 	ieee80211_media_init(ic);
709 	/*
710 	 * initialize default tx key
711 	 */
712 	ic->ic_def_txkey = 0;
713 	err = ddi_intr_add_softint(dip, &sc->sc_soft_hdl, DDI_INTR_SOFTPRI_MAX,
714 	    iwk_rx_softintr, (caddr_t)sc);
715 	if (err != DDI_SUCCESS) {
716 		cmn_err(CE_WARN, "iwk_attach(): "
717 		    "add soft interrupt failed\n");
718 		goto attach_fail7;
719 	}
720 
721 	/*
722 	 * Add the interrupt handler
723 	 */
724 	err = ddi_intr_add_handler(sc->sc_intr_htable[0], iwk_intr,
725 	    (caddr_t)sc, NULL);
726 	if (err != DDI_SUCCESS) {
727 		cmn_err(CE_WARN, "iwk_attach(): "
728 		    "ddi_intr_add_handle() failed\n");
729 		goto attach_fail8;
730 	}
731 
732 	err = ddi_intr_enable(sc->sc_intr_htable[0]);
733 	if (err != DDI_SUCCESS) {
734 		cmn_err(CE_WARN, "iwk_attach(): "
735 		    "ddi_intr_enable() failed\n");
736 		goto attach_fail_intr_d;
737 	}
738 
739 	/*
740 	 * Initialize pointer to device specific functions
741 	 */
742 	wd.wd_secalloc = WIFI_SEC_NONE;
743 	wd.wd_opmode = ic->ic_opmode;
744 	IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr);
745 
746 	macp = mac_alloc(MAC_VERSION);
747 	if (macp == NULL) {
748 		cmn_err(CE_WARN,
749 		    "iwk_attach(): failed to do mac_alloc()\n");
750 		goto attach_fail9;
751 	}
752 
753 	macp->m_type_ident	= MAC_PLUGIN_IDENT_WIFI;
754 	macp->m_driver		= sc;
755 	macp->m_dip		= dip;
756 	macp->m_src_addr	= ic->ic_macaddr;
757 	macp->m_callbacks	= &iwk_m_callbacks;
758 	macp->m_min_sdu		= 0;
759 	macp->m_max_sdu		= IEEE80211_MTU;
760 	macp->m_pdata		= &wd;
761 	macp->m_pdata_size	= sizeof (wd);
762 
763 	/*
764 	 * Register the macp to mac
765 	 */
766 	err = mac_register(macp, &ic->ic_mach);
767 	mac_free(macp);
768 	if (err != DDI_SUCCESS) {
769 		cmn_err(CE_WARN,
770 		    "iwk_attach(): failed to do mac_register()\n");
771 		goto attach_fail9;
772 	}
773 
774 	/*
775 	 * Create minor node of type DDI_NT_NET_WIFI
776 	 */
777 	(void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_4965"%d", instance);
778 	err = ddi_create_minor_node(dip, strbuf, S_IFCHR,
779 	    instance + 1, DDI_NT_NET_WIFI, 0);
780 	if (err != DDI_SUCCESS)
781 		cmn_err(CE_WARN,
782 		    "iwk_attach(): failed to do ddi_create_minor_node()\n");
783 
784 	/*
785 	 * Notify link is down now
786 	 */
787 	mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
788 
789 	/*
790 	 * create the mf thread to handle the link status,
791 	 * recovery fatal error, etc.
792 	 */
793 	sc->sc_mf_thread_switch = 1;
794 	if (sc->sc_mf_thread == NULL)
795 		sc->sc_mf_thread = thread_create((caddr_t)NULL, 0,
796 		    iwk_thread, sc, 0, &p0, TS_RUN, minclsyspri);
797 
798 	sc->sc_flags |= IWK_F_ATTACHED;
799 
800 	return (DDI_SUCCESS);
801 attach_fail9:
802 	(void) ddi_intr_disable(sc->sc_intr_htable[0]);
803 attach_fail_intr_d:
804 	(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
805 
806 attach_fail8:
807 	(void) ddi_intr_remove_softint(sc->sc_soft_hdl);
808 	sc->sc_soft_hdl = NULL;
809 attach_fail7:
810 	ieee80211_detach(ic);
811 attach_fail6:
812 	iwk_free_fw_dma(sc);
813 attach_fail5:
814 	iwk_ring_free(sc);
815 attach_fail4:
816 	iwk_free_kw(sc);
817 attach_fail3a:
818 	iwk_free_shared(sc);
819 attach_fail3:
820 	iwk_destroy_locks(sc);
821 attach_fail_intr_c:
822 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
823 attach_fail_intr_b:
824 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
825 attach_fail_intr_a:
826 	ddi_regs_map_free(&sc->sc_handle);
827 attach_fail2a:
828 	ddi_regs_map_free(&sc->sc_cfg_handle);
829 attach_fail2:
830 	ddi_soft_state_free(iwk_soft_state_p, instance);
831 attach_fail1:
832 	return (err);
833 }
834 
835 int
836 iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
837 {
838 	iwk_sc_t	*sc;
839 	int err;
840 
841 	sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip));
842 	ASSERT(sc != NULL);
843 
844 	switch (cmd) {
845 	case DDI_DETACH:
846 		break;
847 	case DDI_SUSPEND:
848 		mutex_enter(&sc->sc_glock);
849 		sc->sc_flags |= IWK_F_SUSPEND;
850 		mutex_exit(&sc->sc_glock);
851 		if (sc->sc_flags & IWK_F_RUNNING) {
852 			iwk_stop(sc);
853 		}
854 
855 		IWK_DBG((IWK_DEBUG_RESUME, "iwk: suspend\n"));
856 		return (DDI_SUCCESS);
857 	default:
858 		return (DDI_FAILURE);
859 	}
860 
861 	if (!(sc->sc_flags & IWK_F_ATTACHED))
862 		return (DDI_FAILURE);
863 
864 	err = mac_disable(sc->sc_ic.ic_mach);
865 	if (err != DDI_SUCCESS)
866 		return (err);
867 
868 	/*
869 	 * Destroy the mf_thread
870 	 */
871 	mutex_enter(&sc->sc_mt_lock);
872 	sc->sc_mf_thread_switch = 0;
873 	while (sc->sc_mf_thread != NULL) {
874 		if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0)
875 			break;
876 	}
877 	mutex_exit(&sc->sc_mt_lock);
878 
879 	iwk_stop(sc);
880 	DELAY(500000);
881 
882 	/*
883 	 * Unregiste from the MAC layer subsystem
884 	 */
885 	(void) mac_unregister(sc->sc_ic.ic_mach);
886 
887 	mutex_enter(&sc->sc_glock);
888 	iwk_free_fw_dma(sc);
889 	iwk_ring_free(sc);
890 	iwk_free_kw(sc);
891 	iwk_free_shared(sc);
892 	mutex_exit(&sc->sc_glock);
893 
894 	(void) ddi_intr_disable(sc->sc_intr_htable[0]);
895 	(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
896 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
897 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
898 
899 	(void) ddi_intr_remove_softint(sc->sc_soft_hdl);
900 	sc->sc_soft_hdl = NULL;
901 
902 	/*
903 	 * detach ieee80211
904 	 */
905 	ieee80211_detach(&sc->sc_ic);
906 
907 	iwk_destroy_locks(sc);
908 
909 	ddi_regs_map_free(&sc->sc_handle);
910 	ddi_regs_map_free(&sc->sc_cfg_handle);
911 	ddi_remove_minor_node(dip, NULL);
912 	ddi_soft_state_free(iwk_soft_state_p, ddi_get_instance(dip));
913 
914 	return (DDI_SUCCESS);
915 }
916 
917 /*
918  * quiesce(9E) entry point.
919  *
920  * This function is called when the system is single-threaded at high
921  * PIL with preemption disabled. Therefore, this function must not be
922  * blocked.
923  *
924  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
925  * DDI_FAILURE indicates an error condition and should almost never happen.
926  */
927 int
928 iwk_quiesce(dev_info_t *dip)
929 {
930 	iwk_sc_t	*sc;
931 
932 	sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip));
933 	ASSERT(sc != NULL);
934 
935 	/* no message prints and no lock accquisition */
936 #ifdef DEBUG
937 	iwk_dbg_flags = 0;
938 #endif
939 	sc->sc_flags |= IWK_F_QUIESCED;
940 
941 	iwk_stop(sc);
942 
943 	return (DDI_SUCCESS);
944 }
945 
946 static void
947 iwk_destroy_locks(iwk_sc_t *sc)
948 {
949 	cv_destroy(&sc->sc_mt_cv);
950 	mutex_destroy(&sc->sc_mt_lock);
951 	cv_destroy(&sc->sc_tx_cv);
952 	cv_destroy(&sc->sc_cmd_cv);
953 	cv_destroy(&sc->sc_fw_cv);
954 	mutex_destroy(&sc->sc_tx_lock);
955 	mutex_destroy(&sc->sc_glock);
956 }
957 
958 /*
959  * Allocate an area of memory and a DMA handle for accessing it
960  */
961 static int
962 iwk_alloc_dma_mem(iwk_sc_t *sc, size_t memsize,
963     ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p,
964     uint_t dma_flags, iwk_dma_t *dma_p)
965 {
966 	caddr_t vaddr;
967 	int err;
968 
969 	/*
970 	 * Allocate handle
971 	 */
972 	err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p,
973 	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
974 	if (err != DDI_SUCCESS) {
975 		dma_p->dma_hdl = NULL;
976 		return (DDI_FAILURE);
977 	}
978 
979 	/*
980 	 * Allocate memory
981 	 */
982 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
983 	    dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
984 	    DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
985 	if (err != DDI_SUCCESS) {
986 		ddi_dma_free_handle(&dma_p->dma_hdl);
987 		dma_p->dma_hdl = NULL;
988 		dma_p->acc_hdl = NULL;
989 		return (DDI_FAILURE);
990 	}
991 
992 	/*
993 	 * Bind the two together
994 	 */
995 	dma_p->mem_va = vaddr;
996 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
997 	    vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
998 	    &dma_p->cookie, &dma_p->ncookies);
999 	if (err != DDI_DMA_MAPPED) {
1000 		ddi_dma_mem_free(&dma_p->acc_hdl);
1001 		ddi_dma_free_handle(&dma_p->dma_hdl);
1002 		dma_p->acc_hdl = NULL;
1003 		dma_p->dma_hdl = NULL;
1004 		return (DDI_FAILURE);
1005 	}
1006 
1007 	dma_p->nslots = ~0U;
1008 	dma_p->size = ~0U;
1009 	dma_p->token = ~0U;
1010 	dma_p->offset = 0;
1011 	return (DDI_SUCCESS);
1012 }
1013 
1014 /*
1015  * Free one allocated area of DMAable memory
1016  */
1017 static void
1018 iwk_free_dma_mem(iwk_dma_t *dma_p)
1019 {
1020 	if (dma_p->dma_hdl != NULL) {
1021 		if (dma_p->ncookies) {
1022 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
1023 			dma_p->ncookies = 0;
1024 		}
1025 		ddi_dma_free_handle(&dma_p->dma_hdl);
1026 		dma_p->dma_hdl = NULL;
1027 	}
1028 
1029 	if (dma_p->acc_hdl != NULL) {
1030 		ddi_dma_mem_free(&dma_p->acc_hdl);
1031 		dma_p->acc_hdl = NULL;
1032 	}
1033 }
1034 
1035 /*
1036  *
1037  */
1038 static int
1039 iwk_alloc_fw_dma(iwk_sc_t *sc)
1040 {
1041 	int err = DDI_SUCCESS;
1042 	iwk_dma_t *dma_p;
1043 	char *t;
1044 
1045 	/*
1046 	 * firmware image layout:
1047 	 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->|
1048 	 */
1049 	t = (char *)(sc->sc_hdr + 1);
1050 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz),
1051 	    &fw_dma_attr, &iwk_dma_accattr,
1052 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1053 	    &sc->sc_dma_fw_text);
1054 	dma_p = &sc->sc_dma_fw_text;
1055 	IWK_DBG((IWK_DEBUG_DMA, "text[ncookies:%d addr:%lx size:%lx]\n",
1056 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1057 	    dma_p->cookie.dmac_size));
1058 	if (err != DDI_SUCCESS) {
1059 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1060 		    " text dma memory");
1061 		goto fail;
1062 	}
1063 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->textsz));
1064 
1065 	t += LE_32(sc->sc_hdr->textsz);
1066 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1067 	    &fw_dma_attr, &iwk_dma_accattr,
1068 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1069 	    &sc->sc_dma_fw_data);
1070 	dma_p = &sc->sc_dma_fw_data;
1071 	IWK_DBG((IWK_DEBUG_DMA, "data[ncookies:%d addr:%lx size:%lx]\n",
1072 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1073 	    dma_p->cookie.dmac_size));
1074 	if (err != DDI_SUCCESS) {
1075 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1076 		    " data dma memory");
1077 		goto fail;
1078 	}
1079 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1080 
1081 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1082 	    &fw_dma_attr, &iwk_dma_accattr,
1083 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1084 	    &sc->sc_dma_fw_data_bak);
1085 	dma_p = &sc->sc_dma_fw_data_bak;
1086 	IWK_DBG((IWK_DEBUG_DMA, "data_bak[ncookies:%d addr:%lx "
1087 	    "size:%lx]\n",
1088 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1089 	    dma_p->cookie.dmac_size));
1090 	if (err != DDI_SUCCESS) {
1091 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1092 		    " data bakeup dma memory");
1093 		goto fail;
1094 	}
1095 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1096 
1097 	t += LE_32(sc->sc_hdr->datasz);
1098 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz),
1099 	    &fw_dma_attr, &iwk_dma_accattr,
1100 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1101 	    &sc->sc_dma_fw_init_text);
1102 	dma_p = &sc->sc_dma_fw_init_text;
1103 	IWK_DBG((IWK_DEBUG_DMA, "init_text[ncookies:%d addr:%lx "
1104 	    "size:%lx]\n",
1105 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1106 	    dma_p->cookie.dmac_size));
1107 	if (err != DDI_SUCCESS) {
1108 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1109 		    "init text dma memory");
1110 		goto fail;
1111 	}
1112 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_textsz));
1113 
1114 	t += LE_32(sc->sc_hdr->init_textsz);
1115 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz),
1116 	    &fw_dma_attr, &iwk_dma_accattr,
1117 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1118 	    &sc->sc_dma_fw_init_data);
1119 	dma_p = &sc->sc_dma_fw_init_data;
1120 	IWK_DBG((IWK_DEBUG_DMA, "init_data[ncookies:%d addr:%lx "
1121 	    "size:%lx]\n",
1122 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1123 	    dma_p->cookie.dmac_size));
1124 	if (err != DDI_SUCCESS) {
1125 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1126 		    "init data dma memory");
1127 		goto fail;
1128 	}
1129 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_datasz));
1130 
1131 	sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz);
1132 fail:
1133 	return (err);
1134 }
1135 
1136 static void
1137 iwk_free_fw_dma(iwk_sc_t *sc)
1138 {
1139 	iwk_free_dma_mem(&sc->sc_dma_fw_text);
1140 	iwk_free_dma_mem(&sc->sc_dma_fw_data);
1141 	iwk_free_dma_mem(&sc->sc_dma_fw_data_bak);
1142 	iwk_free_dma_mem(&sc->sc_dma_fw_init_text);
1143 	iwk_free_dma_mem(&sc->sc_dma_fw_init_data);
1144 }
1145 
1146 /*
1147  * Allocate a shared page between host and NIC.
1148  */
1149 static int
1150 iwk_alloc_shared(iwk_sc_t *sc)
1151 {
1152 	iwk_dma_t *dma_p;
1153 	int err = DDI_SUCCESS;
1154 
1155 	/* must be aligned on a 4K-page boundary */
1156 	err = iwk_alloc_dma_mem(sc, sizeof (iwk_shared_t),
1157 	    &sh_dma_attr, &iwk_dma_accattr,
1158 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1159 	    &sc->sc_dma_sh);
1160 	if (err != DDI_SUCCESS)
1161 		goto fail;
1162 	sc->sc_shared = (iwk_shared_t *)sc->sc_dma_sh.mem_va;
1163 
1164 	dma_p = &sc->sc_dma_sh;
1165 	IWK_DBG((IWK_DEBUG_DMA, "sh[ncookies:%d addr:%lx size:%lx]\n",
1166 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1167 	    dma_p->cookie.dmac_size));
1168 
1169 	return (err);
1170 fail:
1171 	iwk_free_shared(sc);
1172 	return (err);
1173 }
1174 
1175 static void
1176 iwk_free_shared(iwk_sc_t *sc)
1177 {
1178 	iwk_free_dma_mem(&sc->sc_dma_sh);
1179 }
1180 
1181 /*
1182  * Allocate a keep warm page.
1183  */
1184 static int
1185 iwk_alloc_kw(iwk_sc_t *sc)
1186 {
1187 	iwk_dma_t *dma_p;
1188 	int err = DDI_SUCCESS;
1189 
1190 	/* must be aligned on a 4K-page boundary */
1191 	err = iwk_alloc_dma_mem(sc, IWK_KW_SIZE,
1192 	    &kw_dma_attr, &iwk_dma_accattr,
1193 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1194 	    &sc->sc_dma_kw);
1195 	if (err != DDI_SUCCESS)
1196 		goto fail;
1197 
1198 	dma_p = &sc->sc_dma_kw;
1199 	IWK_DBG((IWK_DEBUG_DMA, "kw[ncookies:%d addr:%lx size:%lx]\n",
1200 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1201 	    dma_p->cookie.dmac_size));
1202 
1203 	return (err);
1204 fail:
1205 	iwk_free_kw(sc);
1206 	return (err);
1207 }
1208 
1209 static void
1210 iwk_free_kw(iwk_sc_t *sc)
1211 {
1212 	iwk_free_dma_mem(&sc->sc_dma_kw);
1213 }
1214 
1215 static int
1216 iwk_alloc_rx_ring(iwk_sc_t *sc)
1217 {
1218 	iwk_rx_ring_t *ring;
1219 	iwk_rx_data_t *data;
1220 	iwk_dma_t *dma_p;
1221 	int i, err = DDI_SUCCESS;
1222 
1223 	ring = &sc->sc_rxq;
1224 	ring->cur = 0;
1225 
1226 	err = iwk_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t),
1227 	    &ring_desc_dma_attr, &iwk_dma_accattr,
1228 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1229 	    &ring->dma_desc);
1230 	if (err != DDI_SUCCESS) {
1231 		cmn_err(CE_WARN, "dma alloc rx ring desc failed\n");
1232 		goto fail;
1233 	}
1234 	ring->desc = (uint32_t *)ring->dma_desc.mem_va;
1235 	dma_p = &ring->dma_desc;
1236 	IWK_DBG((IWK_DEBUG_DMA, "rx bd[ncookies:%d addr:%lx size:%lx]\n",
1237 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1238 	    dma_p->cookie.dmac_size));
1239 
1240 	/*
1241 	 * Allocate Rx buffers.
1242 	 */
1243 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1244 		data = &ring->data[i];
1245 		err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1246 		    &rx_buffer_dma_attr, &iwk_dma_accattr,
1247 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1248 		    &data->dma_data);
1249 		if (err != DDI_SUCCESS) {
1250 			cmn_err(CE_WARN, "dma alloc rx ring buf[%d] "
1251 			    "failed\n", i);
1252 			goto fail;
1253 		}
1254 		/*
1255 		 * the physical address bit [8-36] are used,
1256 		 * instead of bit [0-31] in 3945.
1257 		 */
1258 		ring->desc[i] = LE_32((uint32_t)
1259 		    (data->dma_data.cookie.dmac_address >> 8));
1260 	}
1261 	dma_p = &ring->data[0].dma_data;
1262 	IWK_DBG((IWK_DEBUG_DMA, "rx buffer[0][ncookies:%d addr:%lx "
1263 	    "size:%lx]\n",
1264 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1265 	    dma_p->cookie.dmac_size));
1266 
1267 	IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1268 
1269 	return (err);
1270 
1271 fail:
1272 	iwk_free_rx_ring(sc);
1273 	return (err);
1274 }
1275 
1276 static void
1277 iwk_reset_rx_ring(iwk_sc_t *sc)
1278 {
1279 	int n;
1280 
1281 	iwk_mac_access_enter(sc);
1282 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1283 	for (n = 0; n < 2000; n++) {
1284 		if (IWK_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24))
1285 			break;
1286 		DELAY(1000);
1287 	}
1288 
1289 	if (n == 2000)
1290 		IWK_DBG((IWK_DEBUG_DMA, "timeout resetting Rx ring\n"));
1291 
1292 	iwk_mac_access_exit(sc);
1293 
1294 	sc->sc_rxq.cur = 0;
1295 }
1296 
1297 static void
1298 iwk_free_rx_ring(iwk_sc_t *sc)
1299 {
1300 	int i;
1301 
1302 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1303 		if (sc->sc_rxq.data[i].dma_data.dma_hdl)
1304 			IWK_DMA_SYNC(sc->sc_rxq.data[i].dma_data,
1305 			    DDI_DMA_SYNC_FORCPU);
1306 		iwk_free_dma_mem(&sc->sc_rxq.data[i].dma_data);
1307 	}
1308 
1309 	if (sc->sc_rxq.dma_desc.dma_hdl)
1310 		IWK_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV);
1311 	iwk_free_dma_mem(&sc->sc_rxq.dma_desc);
1312 }
1313 
1314 static int
1315 iwk_alloc_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring,
1316     int slots, int qid)
1317 {
1318 	iwk_tx_data_t *data;
1319 	iwk_tx_desc_t *desc_h;
1320 	uint32_t paddr_desc_h;
1321 	iwk_cmd_t *cmd_h;
1322 	uint32_t paddr_cmd_h;
1323 	iwk_dma_t *dma_p;
1324 	int i, err = DDI_SUCCESS;
1325 
1326 	ring->qid = qid;
1327 	ring->count = TFD_QUEUE_SIZE_MAX;
1328 	ring->window = slots;
1329 	ring->queued = 0;
1330 	ring->cur = 0;
1331 
1332 	err = iwk_alloc_dma_mem(sc,
1333 	    TFD_QUEUE_SIZE_MAX * sizeof (iwk_tx_desc_t),
1334 	    &ring_desc_dma_attr, &iwk_dma_accattr,
1335 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1336 	    &ring->dma_desc);
1337 	if (err != DDI_SUCCESS) {
1338 		cmn_err(CE_WARN, "dma alloc tx ring desc[%d] "
1339 		    "failed\n", qid);
1340 		goto fail;
1341 	}
1342 	dma_p = &ring->dma_desc;
1343 	IWK_DBG((IWK_DEBUG_DMA, "tx bd[ncookies:%d addr:%lx size:%lx]\n",
1344 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1345 	    dma_p->cookie.dmac_size));
1346 
1347 	desc_h = (iwk_tx_desc_t *)ring->dma_desc.mem_va;
1348 	paddr_desc_h = ring->dma_desc.cookie.dmac_address;
1349 
1350 	err = iwk_alloc_dma_mem(sc,
1351 	    TFD_QUEUE_SIZE_MAX * sizeof (iwk_cmd_t),
1352 	    &cmd_dma_attr, &iwk_dma_accattr,
1353 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1354 	    &ring->dma_cmd);
1355 	if (err != DDI_SUCCESS) {
1356 		cmn_err(CE_WARN, "dma alloc tx ring cmd[%d] "
1357 		    "failed\n", qid);
1358 		goto fail;
1359 	}
1360 	dma_p = &ring->dma_cmd;
1361 	IWK_DBG((IWK_DEBUG_DMA, "tx cmd[ncookies:%d addr:%lx size:%lx]\n",
1362 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1363 	    dma_p->cookie.dmac_size));
1364 
1365 	cmd_h = (iwk_cmd_t *)ring->dma_cmd.mem_va;
1366 	paddr_cmd_h = ring->dma_cmd.cookie.dmac_address;
1367 
1368 	/*
1369 	 * Allocate Tx buffers.
1370 	 */
1371 	ring->data = kmem_zalloc(sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX,
1372 	    KM_NOSLEEP);
1373 	if (ring->data == NULL) {
1374 		cmn_err(CE_WARN, "could not allocate tx data slots\n");
1375 		goto fail;
1376 	}
1377 
1378 	for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) {
1379 		data = &ring->data[i];
1380 		err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1381 		    &tx_buffer_dma_attr, &iwk_dma_accattr,
1382 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
1383 		    &data->dma_data);
1384 		if (err != DDI_SUCCESS) {
1385 			cmn_err(CE_WARN, "dma alloc tx ring "
1386 			    "buf[%d] failed\n", i);
1387 			goto fail;
1388 		}
1389 
1390 		data->desc = desc_h + i;
1391 		data->paddr_desc = paddr_desc_h +
1392 		    _PTRDIFF(data->desc, desc_h);
1393 		data->cmd = cmd_h +  i; /* (i % slots); */
1394 		/* ((i % slots) * sizeof (iwk_cmd_t)); */
1395 		data->paddr_cmd = paddr_cmd_h +
1396 		    _PTRDIFF(data->cmd, cmd_h);
1397 	}
1398 	dma_p = &ring->data[0].dma_data;
1399 	IWK_DBG((IWK_DEBUG_DMA, "tx buffer[0][ncookies:%d addr:%lx "
1400 	    "size:%lx]\n",
1401 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1402 	    dma_p->cookie.dmac_size));
1403 
1404 	return (err);
1405 
1406 fail:
1407 	if (ring->data)
1408 		kmem_free(ring->data,
1409 		    sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX);
1410 	iwk_free_tx_ring(sc, ring);
1411 	return (err);
1412 }
1413 
1414 static void
1415 iwk_reset_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring)
1416 {
1417 	iwk_tx_data_t *data;
1418 	int i, n;
1419 
1420 	iwk_mac_access_enter(sc);
1421 
1422 	IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0);
1423 	for (n = 0; n < 200; n++) {
1424 		if (IWK_READ(sc, IWK_FH_TSSR_TX_STATUS_REG) &
1425 		    IWK_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid))
1426 			break;
1427 		DELAY(10);
1428 	}
1429 	if (n == 200) {
1430 		IWK_DBG((IWK_DEBUG_DMA, "timeout reset tx ring %d\n",
1431 		    ring->qid));
1432 	}
1433 	iwk_mac_access_exit(sc);
1434 
1435 	for (i = 0; i < ring->count; i++) {
1436 		data = &ring->data[i];
1437 		IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
1438 	}
1439 
1440 	ring->queued = 0;
1441 	ring->cur = 0;
1442 }
1443 
1444 /*ARGSUSED*/
1445 static void
1446 iwk_free_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring)
1447 {
1448 	int i;
1449 
1450 	if (ring->dma_desc.dma_hdl != NULL)
1451 		IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1452 	iwk_free_dma_mem(&ring->dma_desc);
1453 
1454 	if (ring->dma_cmd.dma_hdl != NULL)
1455 		IWK_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV);
1456 	iwk_free_dma_mem(&ring->dma_cmd);
1457 
1458 	if (ring->data != NULL) {
1459 		for (i = 0; i < ring->count; i++) {
1460 			if (ring->data[i].dma_data.dma_hdl)
1461 				IWK_DMA_SYNC(ring->data[i].dma_data,
1462 				    DDI_DMA_SYNC_FORDEV);
1463 			iwk_free_dma_mem(&ring->data[i].dma_data);
1464 		}
1465 		kmem_free(ring->data, ring->count * sizeof (iwk_tx_data_t));
1466 	}
1467 }
1468 
1469 static int
1470 iwk_ring_init(iwk_sc_t *sc)
1471 {
1472 	int i, err = DDI_SUCCESS;
1473 
1474 	for (i = 0; i < IWK_NUM_QUEUES; i++) {
1475 		if (i == IWK_CMD_QUEUE_NUM)
1476 			continue;
1477 		err = iwk_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS,
1478 		    i);
1479 		if (err != DDI_SUCCESS)
1480 			goto fail;
1481 	}
1482 	err = iwk_alloc_tx_ring(sc, &sc->sc_txq[IWK_CMD_QUEUE_NUM],
1483 	    TFD_CMD_SLOTS, IWK_CMD_QUEUE_NUM);
1484 	if (err != DDI_SUCCESS)
1485 		goto fail;
1486 	err = iwk_alloc_rx_ring(sc);
1487 	if (err != DDI_SUCCESS)
1488 		goto fail;
1489 	return (err);
1490 
1491 fail:
1492 	return (err);
1493 }
1494 
1495 static void
1496 iwk_ring_free(iwk_sc_t *sc)
1497 {
1498 	int i = IWK_NUM_QUEUES;
1499 
1500 	iwk_free_rx_ring(sc);
1501 	while (--i >= 0) {
1502 		iwk_free_tx_ring(sc, &sc->sc_txq[i]);
1503 	}
1504 }
1505 
1506 /* ARGSUSED */
1507 static ieee80211_node_t *
1508 iwk_node_alloc(ieee80211com_t *ic)
1509 {
1510 	iwk_amrr_t *amrr;
1511 
1512 	amrr = kmem_zalloc(sizeof (iwk_amrr_t), KM_SLEEP);
1513 	if (amrr != NULL)
1514 		iwk_amrr_init(amrr);
1515 	return (&amrr->in);
1516 }
1517 
1518 static void
1519 iwk_node_free(ieee80211_node_t *in)
1520 {
1521 	ieee80211com_t *ic = in->in_ic;
1522 
1523 	ic->ic_node_cleanup(in);
1524 	if (in->in_wpa_ie != NULL)
1525 		ieee80211_free(in->in_wpa_ie);
1526 	kmem_free(in, sizeof (iwk_amrr_t));
1527 }
1528 
1529 /*ARGSUSED*/
1530 static int
1531 iwk_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1532 {
1533 	iwk_sc_t *sc = (iwk_sc_t *)ic;
1534 	ieee80211_node_t *in = ic->ic_bss;
1535 	enum ieee80211_state ostate = ic->ic_state;
1536 	int i, err = IWK_SUCCESS;
1537 
1538 	mutex_enter(&sc->sc_glock);
1539 	switch (nstate) {
1540 	case IEEE80211_S_SCAN:
1541 		switch (ostate) {
1542 		case IEEE80211_S_INIT:
1543 		{
1544 			iwk_add_sta_t node;
1545 
1546 			sc->sc_flags |= IWK_F_SCANNING;
1547 			sc->sc_scan_pending = 0;
1548 			iwk_set_led(sc, 2, 10, 2);
1549 
1550 			/*
1551 			 * clear association to receive beacons from
1552 			 * all BSS'es
1553 			 */
1554 			sc->sc_config.assoc_id = 0;
1555 			sc->sc_config.filter_flags &=
1556 			    ~LE_32(RXON_FILTER_ASSOC_MSK);
1557 
1558 			IWK_DBG((IWK_DEBUG_80211, "config chan %d "
1559 			    "flags %x filter_flags %x\n", sc->sc_config.chan,
1560 			    sc->sc_config.flags, sc->sc_config.filter_flags));
1561 
1562 			err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
1563 			    sizeof (iwk_rxon_cmd_t), 1);
1564 			if (err != IWK_SUCCESS) {
1565 				cmn_err(CE_WARN,
1566 				    "could not clear association\n");
1567 				sc->sc_flags &= ~IWK_F_SCANNING;
1568 				mutex_exit(&sc->sc_glock);
1569 				return (err);
1570 			}
1571 
1572 			/* add broadcast node to send probe request */
1573 			(void) memset(&node, 0, sizeof (node));
1574 			(void) memset(&node.bssid, 0xff, IEEE80211_ADDR_LEN);
1575 			node.id = IWK_BROADCAST_ID;
1576 			err = iwk_cmd(sc, REPLY_ADD_STA, &node,
1577 			    sizeof (node), 1);
1578 			if (err != IWK_SUCCESS) {
1579 				cmn_err(CE_WARN, "could not add "
1580 				    "broadcast node\n");
1581 				sc->sc_flags &= ~IWK_F_SCANNING;
1582 				mutex_exit(&sc->sc_glock);
1583 				return (err);
1584 			}
1585 			break;
1586 		}
1587 
1588 		case IEEE80211_S_AUTH:
1589 		case IEEE80211_S_ASSOC:
1590 		case IEEE80211_S_RUN:
1591 			sc->sc_flags |= IWK_F_SCANNING;
1592 			sc->sc_scan_pending = 0;
1593 
1594 			iwk_set_led(sc, 2, 10, 2);
1595 			/* FALLTHRU */
1596 		case IEEE80211_S_SCAN:
1597 			mutex_exit(&sc->sc_glock);
1598 			/* step to next channel before actual FW scan */
1599 			err = sc->sc_newstate(ic, nstate, arg);
1600 			mutex_enter(&sc->sc_glock);
1601 			if ((err != 0) || ((err = iwk_scan(sc)) != 0)) {
1602 				cmn_err(CE_WARN,
1603 				    "could not initiate scan\n");
1604 				sc->sc_flags &= ~IWK_F_SCANNING;
1605 				ieee80211_cancel_scan(ic);
1606 			}
1607 			mutex_exit(&sc->sc_glock);
1608 			return (err);
1609 		default:
1610 			break;
1611 
1612 		}
1613 		sc->sc_clk = 0;
1614 		break;
1615 
1616 	case IEEE80211_S_AUTH:
1617 		if (ostate == IEEE80211_S_SCAN) {
1618 			sc->sc_flags &= ~IWK_F_SCANNING;
1619 		}
1620 
1621 		/* reset state to handle reassociations correctly */
1622 		sc->sc_config.assoc_id = 0;
1623 		sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
1624 
1625 		/*
1626 		 * before sending authentication and association request frame,
1627 		 * we need do something in the hardware, such as setting the
1628 		 * channel same to the target AP...
1629 		 */
1630 		if ((err = iwk_hw_set_before_auth(sc)) != 0) {
1631 			cmn_err(CE_WARN, "could not setup firmware for "
1632 			    "authentication\n");
1633 			mutex_exit(&sc->sc_glock);
1634 			return (err);
1635 		}
1636 		break;
1637 
1638 	case IEEE80211_S_RUN:
1639 		if (ostate == IEEE80211_S_SCAN) {
1640 			sc->sc_flags &= ~IWK_F_SCANNING;
1641 		}
1642 
1643 		if (ic->ic_opmode == IEEE80211_M_MONITOR) {
1644 			/* let LED blink when monitoring */
1645 			iwk_set_led(sc, 2, 10, 10);
1646 			break;
1647 		}
1648 		IWK_DBG((IWK_DEBUG_80211, "iwk: associated."));
1649 
1650 		/* IBSS mode */
1651 		if (ic->ic_opmode == IEEE80211_M_IBSS) {
1652 			/*
1653 			 * clean all nodes in ibss node table
1654 			 * in order to be consistent with hardware
1655 			 */
1656 			err = iwk_run_state_config_ibss(ic);
1657 			if (err != IWK_SUCCESS) {
1658 				cmn_err(CE_WARN, "iwk_newstate(): "
1659 				    "failed to update configuration "
1660 				    "in IBSS mode\n");
1661 				mutex_exit(&sc->sc_glock);
1662 				return (err);
1663 			}
1664 		}
1665 
1666 		/* none IBSS mode */
1667 		if (ic->ic_opmode != IEEE80211_M_IBSS) {
1668 			/* update adapter's configuration */
1669 			err = iwk_run_state_config_sta(ic);
1670 			if (err != IWK_SUCCESS) {
1671 				cmn_err(CE_WARN, "iwk_newstate(): "
1672 				    "failed to update configuration "
1673 				    "in none IBSS mode\n");
1674 				mutex_exit(&sc->sc_glock);
1675 				return (err);
1676 			}
1677 		}
1678 
1679 		/* obtain current temperature of chipset */
1680 		sc->sc_tempera = iwk_curr_tempera(sc);
1681 
1682 		/*
1683 		 * make Tx power calibration to determine
1684 		 * the gains of DSP and radio
1685 		 */
1686 		err = iwk_tx_power_calibration(sc);
1687 		if (err) {
1688 			cmn_err(CE_WARN, "iwk_newstate(): "
1689 			    "failed to set tx power table\n");
1690 			mutex_exit(&sc->sc_glock);
1691 			return (err);
1692 		}
1693 
1694 		if (ic->ic_opmode == IEEE80211_M_IBSS) {
1695 
1696 			/*
1697 			 * allocate and transmit beacon frames
1698 			 */
1699 			err = iwk_start_tx_beacon(ic);
1700 			if (err != IWK_SUCCESS) {
1701 				cmn_err(CE_WARN, "iwk_newstate(): "
1702 				    "can't transmit beacon frames\n");
1703 				mutex_exit(&sc->sc_glock);
1704 				return (err);
1705 			}
1706 		}
1707 
1708 		/* start automatic rate control */
1709 		mutex_enter(&sc->sc_mt_lock);
1710 		if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
1711 			sc->sc_flags |= IWK_F_RATE_AUTO_CTL;
1712 			/* set rate to some reasonable initial value */
1713 			i = in->in_rates.ir_nrates - 1;
1714 			while (i > 0 && IEEE80211_RATE(i) > 72)
1715 				i--;
1716 			in->in_txrate = i;
1717 		} else {
1718 			sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL;
1719 		}
1720 		mutex_exit(&sc->sc_mt_lock);
1721 
1722 		/* set LED on after associated */
1723 		iwk_set_led(sc, 2, 0, 1);
1724 		break;
1725 
1726 	case IEEE80211_S_INIT:
1727 		if (ostate == IEEE80211_S_SCAN) {
1728 			sc->sc_flags &= ~IWK_F_SCANNING;
1729 		}
1730 
1731 		/* set LED off after init */
1732 		iwk_set_led(sc, 2, 1, 0);
1733 		break;
1734 	case IEEE80211_S_ASSOC:
1735 		if (ostate == IEEE80211_S_SCAN) {
1736 			sc->sc_flags &= ~IWK_F_SCANNING;
1737 		}
1738 
1739 		break;
1740 	}
1741 
1742 	mutex_exit(&sc->sc_glock);
1743 
1744 	err = sc->sc_newstate(ic, nstate, arg);
1745 
1746 	if (nstate == IEEE80211_S_RUN) {
1747 
1748 		mutex_enter(&sc->sc_glock);
1749 
1750 		/*
1751 		 * make initialization for Receiver
1752 		 * sensitivity calibration
1753 		 */
1754 		err = iwk_rx_sens_init(sc);
1755 		if (err) {
1756 			cmn_err(CE_WARN, "iwk_newstate(): "
1757 			    "failed to init RX sensitivity\n");
1758 			mutex_exit(&sc->sc_glock);
1759 			return (err);
1760 		}
1761 
1762 		/* make initialization for Receiver gain balance */
1763 		err = iwk_rxgain_diff_init(sc);
1764 		if (err) {
1765 			cmn_err(CE_WARN, "iwk_newstate(): "
1766 			    "failed to init phy calibration\n");
1767 			mutex_exit(&sc->sc_glock);
1768 			return (err);
1769 		}
1770 
1771 		mutex_exit(&sc->sc_glock);
1772 
1773 	}
1774 
1775 	return (err);
1776 }
1777 
1778 static void
1779 iwk_watchdog(void *arg)
1780 {
1781 	iwk_sc_t *sc = arg;
1782 	struct ieee80211com *ic = &sc->sc_ic;
1783 #ifdef DEBUG
1784 	timeout_id_t timeout_id = ic->ic_watchdog_timer;
1785 #endif
1786 
1787 	ieee80211_stop_watchdog(ic);
1788 
1789 	if ((ic->ic_state != IEEE80211_S_AUTH) &&
1790 	    (ic->ic_state != IEEE80211_S_ASSOC))
1791 		return;
1792 
1793 	if (ic->ic_bss->in_fails > 0) {
1794 		IWK_DBG((IWK_DEBUG_80211, "watchdog (0x%x) reset: "
1795 		    "node (0x%x)\n", timeout_id, &ic->ic_bss));
1796 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
1797 	} else {
1798 		IWK_DBG((IWK_DEBUG_80211, "watchdog (0x%x) timeout: "
1799 		    "node (0x%x), retry (%d)\n",
1800 		    timeout_id, &ic->ic_bss, ic->ic_bss->in_fails + 1));
1801 		ieee80211_watchdog(ic);
1802 	}
1803 }
1804 
1805 /*ARGSUSED*/
1806 static int iwk_key_set(ieee80211com_t *ic, const struct ieee80211_key *k,
1807     const uint8_t mac[IEEE80211_ADDR_LEN])
1808 {
1809 	iwk_sc_t *sc = (iwk_sc_t *)ic;
1810 	iwk_add_sta_t node;
1811 	int err;
1812 	uint8_t index1;
1813 
1814 	switch (k->wk_cipher->ic_cipher) {
1815 	case IEEE80211_CIPHER_WEP:
1816 	case IEEE80211_CIPHER_TKIP:
1817 		return (1); /* sofeware do it. */
1818 	case IEEE80211_CIPHER_AES_CCM:
1819 		break;
1820 	default:
1821 		return (0);
1822 	}
1823 	sc->sc_config.filter_flags &= ~(RXON_FILTER_DIS_DECRYPT_MSK |
1824 	    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
1825 
1826 	mutex_enter(&sc->sc_glock);
1827 
1828 	/* update ap/multicast node */
1829 	(void) memset(&node, 0, sizeof (node));
1830 	if (IEEE80211_IS_MULTICAST(mac)) {
1831 		(void) memset(node.bssid, 0xff, 6);
1832 		node.id = IWK_BROADCAST_ID;
1833 	} else if (ic->ic_opmode == IEEE80211_M_IBSS) {
1834 		mutex_exit(&sc->sc_glock);
1835 		mutex_enter(&sc->sc_ibss.node_tb_lock);
1836 
1837 		/*
1838 		 * search for node in ibss node table
1839 		 */
1840 		for (index1 = IWK_STA_ID; index1 < IWK_STATION_COUNT;
1841 		    index1++) {
1842 			if (sc->sc_ibss.ibss_node_tb[index1].used &&
1843 			    IEEE80211_ADDR_EQ(sc->sc_ibss.
1844 			    ibss_node_tb[index1].node.bssid,
1845 			    mac)) {
1846 				break;
1847 			}
1848 		}
1849 		if (index1 >= IWK_BROADCAST_ID) {
1850 			cmn_err(CE_WARN, "iwk_key_set(): "
1851 			    "have no this node in hardware node table\n");
1852 			mutex_exit(&sc->sc_ibss.node_tb_lock);
1853 			return (0);
1854 		} else {
1855 			/*
1856 			 * configure key for given node in hardware
1857 			 */
1858 			if (k->wk_flags & IEEE80211_KEY_XMIT) {
1859 				sc->sc_ibss.ibss_node_tb[index1].
1860 				    node.key_flags = 0;
1861 				sc->sc_ibss.ibss_node_tb[index1].
1862 				    node.keyp = k->wk_keyix;
1863 			} else {
1864 				sc->sc_ibss.ibss_node_tb[index1].
1865 				    node.key_flags = (1 << 14);
1866 				sc->sc_ibss.ibss_node_tb[index1].
1867 				    node.keyp = k->wk_keyix + 4;
1868 			}
1869 
1870 			(void) memcpy(sc->sc_ibss.ibss_node_tb[index1].node.key,
1871 			    k->wk_key, k->wk_keylen);
1872 			sc->sc_ibss.ibss_node_tb[index1].node.key_flags |=
1873 			    (STA_KEY_FLG_CCMP | (1 << 3) | (k->wk_keyix << 8));
1874 			sc->sc_ibss.ibss_node_tb[index1].node.sta_mask =
1875 			    STA_MODIFY_KEY_MASK;
1876 			sc->sc_ibss.ibss_node_tb[index1].node.control = 1;
1877 
1878 			mutex_enter(&sc->sc_glock);
1879 			err = iwk_cmd(sc, REPLY_ADD_STA,
1880 			    &sc->sc_ibss.ibss_node_tb[index1].node,
1881 			    sizeof (iwk_add_sta_t), 1);
1882 			if (err != IWK_SUCCESS) {
1883 				cmn_err(CE_WARN, "iwk_key_set(): "
1884 				    "failed to update IBSS node in hardware\n");
1885 				mutex_exit(&sc->sc_glock);
1886 				mutex_exit(&sc->sc_ibss.node_tb_lock);
1887 				return (0);
1888 			}
1889 			mutex_exit(&sc->sc_glock);
1890 		}
1891 		mutex_exit(&sc->sc_ibss.node_tb_lock);
1892 		return (1);
1893 	} else {
1894 		IEEE80211_ADDR_COPY(node.bssid, ic->ic_bss->in_bssid);
1895 		node.id = IWK_AP_ID;
1896 	}
1897 	if (k->wk_flags & IEEE80211_KEY_XMIT) {
1898 		node.key_flags = 0;
1899 		node.keyp = k->wk_keyix;
1900 	} else {
1901 		node.key_flags = (1 << 14);
1902 		node.keyp = k->wk_keyix + 4;
1903 	}
1904 	(void) memcpy(node.key, k->wk_key, k->wk_keylen);
1905 	node.key_flags |= (STA_KEY_FLG_CCMP | (1 << 3) | (k->wk_keyix << 8));
1906 	node.sta_mask = STA_MODIFY_KEY_MASK;
1907 	node.control = 1;
1908 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
1909 	if (err != IWK_SUCCESS) {
1910 		cmn_err(CE_WARN, "iwk_key_set():"
1911 		    "failed to update ap node\n");
1912 		mutex_exit(&sc->sc_glock);
1913 		return (0);
1914 	}
1915 	mutex_exit(&sc->sc_glock);
1916 	return (1);
1917 }
1918 
1919 /*
1920  * exclusive access to mac begin.
1921  */
1922 static void
1923 iwk_mac_access_enter(iwk_sc_t *sc)
1924 {
1925 	uint32_t tmp;
1926 	int n;
1927 
1928 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
1929 	IWK_WRITE(sc, CSR_GP_CNTRL,
1930 	    tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1931 
1932 	/* wait until we succeed */
1933 	for (n = 0; n < 1000; n++) {
1934 		if ((IWK_READ(sc, CSR_GP_CNTRL) &
1935 		    (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1936 		    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) ==
1937 		    CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN)
1938 			break;
1939 		DELAY(10);
1940 	}
1941 	if (n == 1000)
1942 		IWK_DBG((IWK_DEBUG_PIO, "could not lock memory\n"));
1943 }
1944 
1945 /*
1946  * exclusive access to mac end.
1947  */
1948 static void
1949 iwk_mac_access_exit(iwk_sc_t *sc)
1950 {
1951 	uint32_t tmp = IWK_READ(sc, CSR_GP_CNTRL);
1952 	IWK_WRITE(sc, CSR_GP_CNTRL,
1953 	    tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1954 }
1955 
1956 static uint32_t
1957 iwk_mem_read(iwk_sc_t *sc, uint32_t addr)
1958 {
1959 	IWK_WRITE(sc, HBUS_TARG_MEM_RADDR, addr);
1960 	return (IWK_READ(sc, HBUS_TARG_MEM_RDAT));
1961 }
1962 
1963 static void
1964 iwk_mem_write(iwk_sc_t *sc, uint32_t addr, uint32_t data)
1965 {
1966 	IWK_WRITE(sc, HBUS_TARG_MEM_WADDR, addr);
1967 	IWK_WRITE(sc, HBUS_TARG_MEM_WDAT, data);
1968 }
1969 
1970 static uint32_t
1971 iwk_reg_read(iwk_sc_t *sc, uint32_t addr)
1972 {
1973 	IWK_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24));
1974 	return (IWK_READ(sc, HBUS_TARG_PRPH_RDAT));
1975 }
1976 
1977 static void
1978 iwk_reg_write(iwk_sc_t *sc, uint32_t addr, uint32_t data)
1979 {
1980 	IWK_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24));
1981 	IWK_WRITE(sc, HBUS_TARG_PRPH_WDAT, data);
1982 }
1983 
1984 static void
1985 iwk_reg_write_region_4(iwk_sc_t *sc, uint32_t addr,
1986     uint32_t *data, int wlen)
1987 {
1988 	for (; wlen > 0; wlen--, data++, addr += 4)
1989 		iwk_reg_write(sc, addr, *data);
1990 }
1991 
1992 
1993 /*
1994  * ucode load/initialization steps:
1995  * 1)  load Bootstrap State Machine (BSM) with "bootstrap" uCode image.
1996  * BSM contains a small memory that *always* stays powered up, so it can
1997  * retain the bootstrap program even when the card is in a power-saving
1998  * power-down state.  The BSM loads the small program into ARC processor's
1999  * instruction memory when triggered by power-up.
2000  * 2)  load Initialize image via bootstrap program.
2001  * The Initialize image sets up regulatory and calibration data for the
2002  * Runtime/Protocol uCode. This sends a REPLY_ALIVE notification when completed.
2003  * The 4965 reply contains calibration data for temperature, voltage and tx gain
2004  * correction.
2005  */
2006 static int
2007 iwk_load_firmware(iwk_sc_t *sc)
2008 {
2009 	uint32_t *boot_fw = (uint32_t *)sc->sc_boot;
2010 	uint32_t size = sc->sc_hdr->bootsz;
2011 	int n, err = IWK_SUCCESS;
2012 
2013 	/*
2014 	 * The physical address bit [4-35] of the initialize uCode.
2015 	 * In the initialize alive notify interrupt the physical address of
2016 	 * the runtime ucode will be set for loading.
2017 	 */
2018 	iwk_mac_access_enter(sc);
2019 
2020 	iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG,
2021 	    sc->sc_dma_fw_init_text.cookie.dmac_address >> 4);
2022 	iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG,
2023 	    sc->sc_dma_fw_init_data.cookie.dmac_address >> 4);
2024 	iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG,
2025 	    sc->sc_dma_fw_init_text.cookie.dmac_size);
2026 	iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG,
2027 	    sc->sc_dma_fw_init_data.cookie.dmac_size);
2028 
2029 	/* load bootstrap code into BSM memory */
2030 	iwk_reg_write_region_4(sc, BSM_SRAM_LOWER_BOUND, boot_fw,
2031 	    size / sizeof (uint32_t));
2032 
2033 	iwk_reg_write(sc, BSM_WR_MEM_SRC_REG, 0);
2034 	iwk_reg_write(sc, BSM_WR_MEM_DST_REG, RTC_INST_LOWER_BOUND);
2035 	iwk_reg_write(sc, BSM_WR_DWCOUNT_REG, size / sizeof (uint32_t));
2036 
2037 	/*
2038 	 * prepare to load initialize uCode
2039 	 */
2040 	iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
2041 
2042 	/* wait while the adapter is busy loading the firmware */
2043 	for (n = 0; n < 1000; n++) {
2044 		if (!(iwk_reg_read(sc, BSM_WR_CTRL_REG) &
2045 		    BSM_WR_CTRL_REG_BIT_START))
2046 			break;
2047 		DELAY(10);
2048 	}
2049 	if (n == 1000) {
2050 		cmn_err(CE_WARN, "timeout transferring firmware\n");
2051 		err = ETIMEDOUT;
2052 		return (err);
2053 	}
2054 
2055 	/* for future power-save mode use */
2056 	iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
2057 
2058 	iwk_mac_access_exit(sc);
2059 
2060 	return (err);
2061 }
2062 
2063 /*ARGSUSED*/
2064 static void
2065 iwk_rx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data)
2066 {
2067 	ieee80211com_t *ic = &sc->sc_ic;
2068 	iwk_rx_ring_t *ring = &sc->sc_rxq;
2069 	iwk_rx_phy_res_t *stat;
2070 	ieee80211_node_t *in;
2071 	uint32_t *tail;
2072 	struct ieee80211_frame *wh;
2073 	mblk_t *mp;
2074 	uint16_t len, rssi, mrssi, agc;
2075 	int16_t t;
2076 	uint32_t ants, i;
2077 	struct iwk_rx_non_cfg_phy *phyinfo;
2078 
2079 	/* assuming not 11n here. cope with 11n in phase-II */
2080 	stat = (iwk_rx_phy_res_t *)(desc + 1);
2081 	if (stat->cfg_phy_cnt > 20) {
2082 		return;
2083 	}
2084 
2085 	phyinfo = (struct iwk_rx_non_cfg_phy *)stat->non_cfg_phy;
2086 	agc = (phyinfo->agc_info & IWK_AGC_DB_MASK) >> IWK_AGC_DB_POS;
2087 	mrssi = 0;
2088 	ants = (stat->phy_flags & RX_PHY_FLAGS_ANTENNAE_MASK) >>
2089 	    RX_PHY_FLAGS_ANTENNAE_OFFSET;
2090 	for (i = 0; i < 3; i++) {
2091 		if (ants & (1 << i))
2092 			mrssi = MAX(mrssi, phyinfo->rssi_info[i << 1]);
2093 	}
2094 	t = mrssi - agc - 44; /* t is the dBM value */
2095 	/*
2096 	 * convert dBm to percentage ???
2097 	 */
2098 	rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t))) /
2099 	    (75 * 75);
2100 	if (rssi > 100)
2101 		rssi = 100;
2102 	if (rssi < 1)
2103 		rssi = 1;
2104 	len = stat->byte_count;
2105 	tail = (uint32_t *)((uint8_t *)(stat + 1) + stat->cfg_phy_cnt + len);
2106 
2107 	IWK_DBG((IWK_DEBUG_RX, "rx intr: idx=%d phy_len=%x len=%d "
2108 	    "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x "
2109 	    "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat),
2110 	    len, stat->rate.r.s.rate, stat->channel,
2111 	    LE_32(stat->timestampl), stat->non_cfg_phy_cnt,
2112 	    stat->cfg_phy_cnt, LE_32(*tail)));
2113 
2114 	if ((len < 16) || (len > sc->sc_dmabuf_sz)) {
2115 		IWK_DBG((IWK_DEBUG_RX, "rx frame oversize\n"));
2116 		return;
2117 	}
2118 
2119 	/*
2120 	 * discard Rx frames with bad CRC
2121 	 */
2122 	if ((LE_32(*tail) &
2123 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) !=
2124 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) {
2125 		IWK_DBG((IWK_DEBUG_RX, "rx crc error tail: %x\n",
2126 		    LE_32(*tail)));
2127 		sc->sc_rx_err++;
2128 		return;
2129 	}
2130 
2131 	wh = (struct ieee80211_frame *)
2132 	    ((uint8_t *)(stat + 1)+ stat->cfg_phy_cnt);
2133 	if (*(uint8_t *)wh == IEEE80211_FC0_SUBTYPE_ASSOC_RESP) {
2134 		sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2);
2135 		IWK_DBG((IWK_DEBUG_RX, "rx : association id = %x\n",
2136 		    sc->sc_assoc_id));
2137 	}
2138 #ifdef DEBUG
2139 	if (iwk_dbg_flags & IWK_DEBUG_RX)
2140 		ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0);
2141 #endif
2142 	in = ieee80211_find_rxnode(ic, wh);
2143 	mp = allocb(len, BPRI_MED);
2144 	if (mp) {
2145 		(void) memcpy(mp->b_wptr, wh, len);
2146 		mp->b_wptr += len;
2147 
2148 		/* send the frame to the 802.11 layer */
2149 		(void) ieee80211_input(ic, mp, in, rssi, 0);
2150 	} else {
2151 		sc->sc_rx_nobuf++;
2152 		IWK_DBG((IWK_DEBUG_RX,
2153 		    "iwk_rx_intr(): alloc rx buf failed\n"));
2154 	}
2155 	/* release node reference */
2156 	ieee80211_free_node(in);
2157 }
2158 
2159 /*ARGSUSED*/
2160 static void
2161 iwk_tx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data)
2162 {
2163 	ieee80211com_t *ic = &sc->sc_ic;
2164 	iwk_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3];
2165 	iwk_tx_stat_t *stat = (iwk_tx_stat_t *)(desc + 1);
2166 	iwk_amrr_t *amrr = (iwk_amrr_t *)ic->ic_bss;
2167 
2168 	IWK_DBG((IWK_DEBUG_TX, "tx done: qid=%d idx=%d"
2169 	    " retries=%d frame_count=%x nkill=%d "
2170 	    "rate=%x duration=%d status=%x\n",
2171 	    desc->hdr.qid, desc->hdr.idx, stat->ntries, stat->frame_count,
2172 	    stat->bt_kill_count, stat->rate.r.s.rate,
2173 	    LE_32(stat->duration), LE_32(stat->status)));
2174 
2175 	amrr->txcnt++;
2176 	IWK_DBG((IWK_DEBUG_RATECTL, "tx: %d cnt\n", amrr->txcnt));
2177 	if (stat->ntries > 0) {
2178 		amrr->retrycnt++;
2179 		sc->sc_tx_retries++;
2180 		IWK_DBG((IWK_DEBUG_TX, "tx: %d retries\n",
2181 		    sc->sc_tx_retries));
2182 	}
2183 
2184 	sc->sc_tx_timer = 0;
2185 
2186 	mutex_enter(&sc->sc_tx_lock);
2187 	ring->queued--;
2188 	if (ring->queued < 0)
2189 		ring->queued = 0;
2190 	if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count << 3))) {
2191 		sc->sc_need_reschedule = 0;
2192 		mutex_exit(&sc->sc_tx_lock);
2193 		mac_tx_update(ic->ic_mach);
2194 		mutex_enter(&sc->sc_tx_lock);
2195 	}
2196 	mutex_exit(&sc->sc_tx_lock);
2197 }
2198 
2199 static void
2200 iwk_cmd_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc)
2201 {
2202 	if ((desc->hdr.qid & 7) != 4) {
2203 		return;
2204 	}
2205 	mutex_enter(&sc->sc_glock);
2206 	sc->sc_flags |= IWK_F_CMD_DONE;
2207 	cv_signal(&sc->sc_cmd_cv);
2208 	mutex_exit(&sc->sc_glock);
2209 	IWK_DBG((IWK_DEBUG_CMD, "rx cmd: "
2210 	    "qid=%x idx=%d flags=%x type=0x%x\n",
2211 	    desc->hdr.qid, desc->hdr.idx, desc->hdr.flags,
2212 	    desc->hdr.type));
2213 }
2214 
2215 static void
2216 iwk_ucode_alive(iwk_sc_t *sc, iwk_rx_desc_t *desc)
2217 {
2218 	uint32_t base, i;
2219 	struct iwk_alive_resp *ar =
2220 	    (struct iwk_alive_resp *)(desc + 1);
2221 
2222 	/* the microcontroller is ready */
2223 	IWK_DBG((IWK_DEBUG_FW,
2224 	    "microcode alive notification minor: %x major: %x type:"
2225 	    " %x subtype: %x\n",
2226 	    ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype));
2227 
2228 	if (LE_32(ar->is_valid) != UCODE_VALID_OK) {
2229 		IWK_DBG((IWK_DEBUG_FW,
2230 		    "microcontroller initialization failed\n"));
2231 	}
2232 	if (ar->ver_subtype == INITIALIZE_SUBTYPE) {
2233 		IWK_DBG((IWK_DEBUG_FW,
2234 		    "initialization alive received.\n"));
2235 		(void) memcpy(&sc->sc_card_alive_init, ar,
2236 		    sizeof (struct iwk_init_alive_resp));
2237 		/* XXX get temperature */
2238 		iwk_mac_access_enter(sc);
2239 		iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG,
2240 		    sc->sc_dma_fw_text.cookie.dmac_address >> 4);
2241 		iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG,
2242 		    sc->sc_dma_fw_data_bak.cookie.dmac_address >> 4);
2243 		iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG,
2244 		    sc->sc_dma_fw_data.cookie.dmac_size);
2245 		iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG,
2246 		    sc->sc_dma_fw_text.cookie.dmac_size | 0x80000000);
2247 		iwk_mac_access_exit(sc);
2248 	} else {
2249 		IWK_DBG((IWK_DEBUG_FW, "runtime alive received.\n"));
2250 		(void) memcpy(&sc->sc_card_alive_run, ar,
2251 		    sizeof (struct iwk_alive_resp));
2252 
2253 		/*
2254 		 * Init SCD related registers to make Tx work. XXX
2255 		 */
2256 		iwk_mac_access_enter(sc);
2257 
2258 		/* read sram address of data base */
2259 		sc->sc_scd_base = iwk_reg_read(sc, SCD_SRAM_BASE_ADDR);
2260 
2261 		/* clear and init SCD_CONTEXT_DATA_OFFSET area. 128 bytes */
2262 		for (base = sc->sc_scd_base + SCD_CONTEXT_DATA_OFFSET, i = 0;
2263 		    i < 128; i += 4)
2264 			iwk_mem_write(sc, base + i, 0);
2265 
2266 		/* clear and init SCD_TX_STTS_BITMAP_OFFSET area. 256 bytes */
2267 		for (base = sc->sc_scd_base + SCD_TX_STTS_BITMAP_OFFSET;
2268 		    i < 256; i += 4)
2269 			iwk_mem_write(sc, base + i, 0);
2270 
2271 		/* clear and init SCD_TRANSLATE_TBL_OFFSET area. 32 bytes */
2272 		for (base = sc->sc_scd_base + SCD_TRANSLATE_TBL_OFFSET;
2273 		    i < sizeof (uint16_t) * IWK_NUM_QUEUES; i += 4)
2274 			iwk_mem_write(sc, base + i, 0);
2275 
2276 		iwk_reg_write(sc, SCD_DRAM_BASE_ADDR,
2277 		    sc->sc_dma_sh.cookie.dmac_address >> 10);
2278 		iwk_reg_write(sc, SCD_QUEUECHAIN_SEL, 0);
2279 
2280 		/* initiate the tx queues */
2281 		for (i = 0; i < IWK_NUM_QUEUES; i++) {
2282 			iwk_reg_write(sc, SCD_QUEUE_RDPTR(i), 0);
2283 			IWK_WRITE(sc, HBUS_TARG_WRPTR, (i << 8));
2284 			iwk_mem_write(sc, sc->sc_scd_base +
2285 			    SCD_CONTEXT_QUEUE_OFFSET(i),
2286 			    (SCD_WIN_SIZE & 0x7f));
2287 			iwk_mem_write(sc, sc->sc_scd_base +
2288 			    SCD_CONTEXT_QUEUE_OFFSET(i) + sizeof (uint32_t),
2289 			    (SCD_FRAME_LIMIT & 0x7f) << 16);
2290 		}
2291 		/* interrupt enable on each queue0-7 */
2292 		iwk_reg_write(sc, SCD_INTERRUPT_MASK,
2293 		    (1 << IWK_NUM_QUEUES) - 1);
2294 		/* enable  each channel 0-7 */
2295 		iwk_reg_write(sc, SCD_TXFACT,
2296 		    SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
2297 		/*
2298 		 * queue 0-7 maps to FIFO 0-7 and
2299 		 * all queues work under FIFO mode (none-scheduler-ack)
2300 		 */
2301 		for (i = 0; i < 7; i++) {
2302 			iwk_reg_write(sc,
2303 			    SCD_QUEUE_STATUS_BITS(i),
2304 			    (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
2305 			    (i << SCD_QUEUE_STTS_REG_POS_TXF)|
2306 			    SCD_QUEUE_STTS_REG_MSK);
2307 		}
2308 		iwk_mac_access_exit(sc);
2309 
2310 		sc->sc_flags |= IWK_F_FW_INIT;
2311 		cv_signal(&sc->sc_fw_cv);
2312 	}
2313 
2314 }
2315 
2316 static uint_t
2317 /* LINTED: argument unused in function: unused */
2318 iwk_rx_softintr(caddr_t arg, caddr_t unused)
2319 {
2320 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2321 	ieee80211com_t *ic = &sc->sc_ic;
2322 	iwk_rx_desc_t *desc;
2323 	iwk_rx_data_t *data;
2324 	uint32_t index;
2325 
2326 	mutex_enter(&sc->sc_glock);
2327 	if (sc->sc_rx_softint_pending != 1) {
2328 		mutex_exit(&sc->sc_glock);
2329 		return (DDI_INTR_UNCLAIMED);
2330 	}
2331 	/* disable interrupts */
2332 	IWK_WRITE(sc, CSR_INT_MASK, 0);
2333 	mutex_exit(&sc->sc_glock);
2334 
2335 	/*
2336 	 * firmware has moved the index of the rx queue, driver get it,
2337 	 * and deal with it.
2338 	 */
2339 	index = LE_32(sc->sc_shared->val0) & 0xfff;
2340 
2341 	while (sc->sc_rxq.cur != index) {
2342 		data = &sc->sc_rxq.data[sc->sc_rxq.cur];
2343 		desc = (iwk_rx_desc_t *)data->dma_data.mem_va;
2344 
2345 		IWK_DBG((IWK_DEBUG_INTR, "rx notification index = %d"
2346 		    " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n",
2347 		    index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx,
2348 		    desc->hdr.flags, desc->hdr.type, LE_32(desc->len)));
2349 
2350 		/* a command other than a tx need to be replied */
2351 		if (!(desc->hdr.qid & 0x80) &&
2352 		    (desc->hdr.type != REPLY_RX_PHY_CMD) &&
2353 		    (desc->hdr.type != REPLY_TX) &&
2354 		    (desc->hdr.type != REPLY_TX_PWR_TABLE_CMD) &&
2355 		    (desc->hdr.type != REPLY_PHY_CALIBRATION_CMD) &&
2356 		    (desc->hdr.type != SENSITIVITY_CMD))
2357 			iwk_cmd_intr(sc, desc);
2358 
2359 		switch (desc->hdr.type) {
2360 		case REPLY_4965_RX:
2361 			iwk_rx_intr(sc, desc, data);
2362 			break;
2363 
2364 		case REPLY_TX:
2365 			iwk_tx_intr(sc, desc, data);
2366 			break;
2367 
2368 		case REPLY_ALIVE:
2369 			iwk_ucode_alive(sc, desc);
2370 			break;
2371 
2372 		case CARD_STATE_NOTIFICATION:
2373 		{
2374 			uint32_t *status = (uint32_t *)(desc + 1);
2375 
2376 			IWK_DBG((IWK_DEBUG_RADIO, "state changed to %x\n",
2377 			    LE_32(*status)));
2378 
2379 			if (LE_32(*status) & 1) {
2380 				/*
2381 				 * the radio button has to be pushed(OFF). It
2382 				 * is considered as a hw error, the
2383 				 * iwk_thread() tries to recover it after the
2384 				 * button is pushed again(ON)
2385 				 */
2386 				cmn_err(CE_NOTE,
2387 				    "iwk_rx_softintr(): "
2388 				    "Radio transmitter is off\n");
2389 				sc->sc_ostate = sc->sc_ic.ic_state;
2390 				ieee80211_new_state(&sc->sc_ic,
2391 				    IEEE80211_S_INIT, -1);
2392 				sc->sc_flags |=
2393 				    (IWK_F_HW_ERR_RECOVER | IWK_F_RADIO_OFF);
2394 			}
2395 			break;
2396 		}
2397 		case SCAN_START_NOTIFICATION:
2398 		{
2399 			iwk_start_scan_t *scan =
2400 			    (iwk_start_scan_t *)(desc + 1);
2401 
2402 			IWK_DBG((IWK_DEBUG_SCAN,
2403 			    "scanning channel %d status %x\n",
2404 			    scan->chan, LE_32(scan->status)));
2405 
2406 			ic->ic_curchan = &ic->ic_sup_channels[scan->chan];
2407 			break;
2408 		}
2409 		case SCAN_COMPLETE_NOTIFICATION:
2410 		{
2411 			iwk_stop_scan_t *scan =
2412 			    (iwk_stop_scan_t *)(desc + 1);
2413 
2414 			IWK_DBG((IWK_DEBUG_SCAN,
2415 			    "completed channel %d (burst of %d) status %02x\n",
2416 			    scan->chan, scan->nchan, scan->status));
2417 
2418 			sc->sc_scan_pending++;
2419 			break;
2420 		}
2421 		case STATISTICS_NOTIFICATION:
2422 			/* handle statistics notification */
2423 			iwk_statistics_notify(sc, desc);
2424 			break;
2425 		}
2426 
2427 		sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE;
2428 	}
2429 
2430 	/*
2431 	 * driver dealt with what reveived in rx queue and tell the information
2432 	 * to the firmware.
2433 	 */
2434 	index = (index == 0) ? RX_QUEUE_SIZE - 1 : index - 1;
2435 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7));
2436 
2437 	mutex_enter(&sc->sc_glock);
2438 	/* re-enable interrupts */
2439 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2440 	sc->sc_rx_softint_pending = 0;
2441 	mutex_exit(&sc->sc_glock);
2442 
2443 	return (DDI_INTR_CLAIMED);
2444 }
2445 
2446 static uint_t
2447 /* LINTED: argument unused in function: unused */
2448 iwk_intr(caddr_t arg, caddr_t unused)
2449 {
2450 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2451 	uint32_t r, rfh;
2452 
2453 	mutex_enter(&sc->sc_glock);
2454 
2455 	if (sc->sc_flags & IWK_F_SUSPEND) {
2456 		mutex_exit(&sc->sc_glock);
2457 		return (DDI_INTR_UNCLAIMED);
2458 	}
2459 
2460 	r = IWK_READ(sc, CSR_INT);
2461 	if (r == 0 || r == 0xffffffff) {
2462 		mutex_exit(&sc->sc_glock);
2463 		return (DDI_INTR_UNCLAIMED);
2464 	}
2465 
2466 	IWK_DBG((IWK_DEBUG_INTR, "interrupt reg %x\n", r));
2467 
2468 	rfh = IWK_READ(sc, CSR_FH_INT_STATUS);
2469 	IWK_DBG((IWK_DEBUG_INTR, "FH interrupt reg %x\n", rfh));
2470 	/* disable interrupts */
2471 	IWK_WRITE(sc, CSR_INT_MASK, 0);
2472 	/* ack interrupts */
2473 	IWK_WRITE(sc, CSR_INT, r);
2474 	IWK_WRITE(sc, CSR_FH_INT_STATUS, rfh);
2475 
2476 	if (sc->sc_soft_hdl == NULL) {
2477 		mutex_exit(&sc->sc_glock);
2478 		return (DDI_INTR_CLAIMED);
2479 	}
2480 	if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) {
2481 		cmn_err(CE_WARN, "fatal firmware error\n");
2482 		mutex_exit(&sc->sc_glock);
2483 #ifdef DEBUG
2484 		/* dump event and error logs to dmesg */
2485 		iwk_write_error_log(sc);
2486 		iwk_write_event_log(sc);
2487 #endif /* DEBUG */
2488 		iwk_stop(sc);
2489 		sc->sc_ostate = sc->sc_ic.ic_state;
2490 		ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
2491 		sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
2492 		return (DDI_INTR_CLAIMED);
2493 	}
2494 
2495 	if (r & BIT_INT_RF_KILL) {
2496 		IWK_DBG((IWK_DEBUG_RADIO, "RF kill\n"));
2497 	}
2498 
2499 	if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) ||
2500 	    (rfh & FH_INT_RX_MASK)) {
2501 		sc->sc_rx_softint_pending = 1;
2502 		(void) ddi_intr_trigger_softint(sc->sc_soft_hdl, NULL);
2503 	}
2504 
2505 	if (r & BIT_INT_ALIVE)	{
2506 		IWK_DBG((IWK_DEBUG_FW, "firmware initialized.\n"));
2507 	}
2508 
2509 	/* re-enable interrupts */
2510 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2511 	mutex_exit(&sc->sc_glock);
2512 
2513 	return (DDI_INTR_CLAIMED);
2514 }
2515 
2516 static uint8_t
2517 iwk_rate_to_plcp(int rate)
2518 {
2519 	uint8_t ret;
2520 
2521 	switch (rate) {
2522 	/* CCK rates */
2523 	case 2:
2524 		ret = 0xa;
2525 		break;
2526 	case 4:
2527 		ret = 0x14;
2528 		break;
2529 	case 11:
2530 		ret = 0x37;
2531 		break;
2532 	case 22:
2533 		ret = 0x6e;
2534 		break;
2535 	/* OFDM rates */
2536 	case 12:
2537 		ret = 0xd;
2538 		break;
2539 	case 18:
2540 		ret = 0xf;
2541 		break;
2542 	case 24:
2543 		ret = 0x5;
2544 		break;
2545 	case 36:
2546 		ret = 0x7;
2547 		break;
2548 	case 48:
2549 		ret = 0x9;
2550 		break;
2551 	case 72:
2552 		ret = 0xb;
2553 		break;
2554 	case 96:
2555 		ret = 0x1;
2556 		break;
2557 	case 108:
2558 		ret = 0x3;
2559 		break;
2560 	default:
2561 		ret = 0;
2562 		break;
2563 	}
2564 	return (ret);
2565 }
2566 
2567 static mblk_t *
2568 iwk_m_tx(void *arg, mblk_t *mp)
2569 {
2570 	iwk_sc_t	*sc = (iwk_sc_t *)arg;
2571 	ieee80211com_t	*ic = &sc->sc_ic;
2572 	mblk_t			*next;
2573 
2574 	if (sc->sc_flags & IWK_F_SUSPEND) {
2575 		freemsgchain(mp);
2576 		return (NULL);
2577 	}
2578 
2579 	if (ic->ic_state != IEEE80211_S_RUN) {
2580 		freemsgchain(mp);
2581 		return (NULL);
2582 	}
2583 
2584 	while (mp != NULL) {
2585 		next = mp->b_next;
2586 		mp->b_next = NULL;
2587 		if (iwk_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) {
2588 			mp->b_next = next;
2589 			break;
2590 		}
2591 		mp = next;
2592 	}
2593 	return (mp);
2594 }
2595 
2596 /* ARGSUSED */
2597 static int
2598 iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
2599 {
2600 	iwk_sc_t *sc = (iwk_sc_t *)ic;
2601 	iwk_tx_ring_t *ring;
2602 	iwk_tx_desc_t *desc;
2603 	iwk_tx_data_t *data;
2604 	iwk_cmd_t *cmd;
2605 	iwk_tx_cmd_t *tx;
2606 	ieee80211_node_t *in;
2607 	struct ieee80211_frame *wh;
2608 	struct ieee80211_key *k = NULL;
2609 	mblk_t *m, *m0;
2610 	int rate, hdrlen, len, len0, mblen, off, err = IWK_SUCCESS;
2611 	uint16_t masks = 0;
2612 	uint8_t index, index1, index2;
2613 
2614 	ring = &sc->sc_txq[0];
2615 	data = &ring->data[ring->cur];
2616 	desc = data->desc;
2617 	cmd = data->cmd;
2618 	bzero(desc, sizeof (*desc));
2619 	bzero(cmd, sizeof (*cmd));
2620 
2621 	mutex_enter(&sc->sc_tx_lock);
2622 	if (sc->sc_flags & IWK_F_SUSPEND) {
2623 		mutex_exit(&sc->sc_tx_lock);
2624 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
2625 		    IEEE80211_FC0_TYPE_DATA) {
2626 			freemsg(mp);
2627 		}
2628 		err = IWK_FAIL;
2629 		goto exit;
2630 	}
2631 
2632 	if (ring->queued > ring->count - 64) {
2633 		IWK_DBG((IWK_DEBUG_TX, "iwk_send(): no txbuf\n"));
2634 		sc->sc_need_reschedule = 1;
2635 		mutex_exit(&sc->sc_tx_lock);
2636 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
2637 		    IEEE80211_FC0_TYPE_DATA) {
2638 			freemsg(mp);
2639 		}
2640 		sc->sc_tx_nobuf++;
2641 		err = IWK_FAIL;
2642 		goto exit;
2643 	}
2644 	mutex_exit(&sc->sc_tx_lock);
2645 
2646 	hdrlen = sizeof (struct ieee80211_frame);
2647 
2648 	m = allocb(msgdsize(mp) + 32, BPRI_MED);
2649 	if (m == NULL) { /* can not alloc buf, drop this package */
2650 		cmn_err(CE_WARN,
2651 		    "iwk_send(): failed to allocate msgbuf\n");
2652 		freemsg(mp);
2653 		err = IWK_SUCCESS;
2654 		goto exit;
2655 	}
2656 	for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) {
2657 		mblen = MBLKL(m0);
2658 		(void) memcpy(m->b_rptr + off, m0->b_rptr, mblen);
2659 		off += mblen;
2660 	}
2661 	m->b_wptr += off;
2662 	freemsg(mp);
2663 
2664 	wh = (struct ieee80211_frame *)m->b_rptr;
2665 
2666 	if (ic->ic_opmode == IEEE80211_M_IBSS &&
2667 	    (!(IEEE80211_IS_MULTICAST(wh->i_addr1)))) {
2668 		mutex_enter(&sc->sc_glock);
2669 		mutex_enter(&sc->sc_ibss.node_tb_lock);
2670 
2671 		/*
2672 		 * search for node in ibss node table
2673 		 */
2674 		for (index1 = IWK_STA_ID;
2675 		    index1 < IWK_STATION_COUNT; index1++) {
2676 			if (sc->sc_ibss.ibss_node_tb[index1].used &&
2677 			    IEEE80211_ADDR_EQ(sc->sc_ibss.
2678 			    ibss_node_tb[index1].node.bssid,
2679 			    wh->i_addr1)) {
2680 				break;
2681 			}
2682 		}
2683 
2684 		/*
2685 		 * if don't find in ibss node table
2686 		 */
2687 		if (index1 >= IWK_BROADCAST_ID) {
2688 			err = iwk_clean_add_node_ibss(ic,
2689 			    wh->i_addr1, &index2);
2690 			if (err != IWK_SUCCESS) {
2691 				cmn_err(CE_WARN, "iwk_send(): "
2692 				    "failed to clean all nodes "
2693 				    "and add one node\n");
2694 				mutex_exit(&sc->sc_ibss.node_tb_lock);
2695 				mutex_exit(&sc->sc_glock);
2696 				freemsg(m);
2697 				sc->sc_tx_err++;
2698 				err = IWK_SUCCESS;
2699 				goto exit;
2700 			}
2701 			index = index2;
2702 		} else {
2703 			index = index1;
2704 		}
2705 		mutex_exit(&sc->sc_ibss.node_tb_lock);
2706 		mutex_exit(&sc->sc_glock);
2707 	}
2708 
2709 	in = ieee80211_find_txnode(ic, wh->i_addr1);
2710 	if (in == NULL) {
2711 		cmn_err(CE_WARN, "iwk_send(): failed to find tx node\n");
2712 		freemsg(m);
2713 		sc->sc_tx_err++;
2714 		err = IWK_SUCCESS;
2715 		goto exit;
2716 	}
2717 	(void) ieee80211_encap(ic, m, in);
2718 
2719 	cmd->hdr.type = REPLY_TX;
2720 	cmd->hdr.flags = 0;
2721 	cmd->hdr.qid = ring->qid;
2722 	cmd->hdr.idx = ring->cur;
2723 
2724 	tx = (iwk_tx_cmd_t *)cmd->data;
2725 	tx->tx_flags = 0;
2726 
2727 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2728 		tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK));
2729 	} else {
2730 		tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
2731 	}
2732 
2733 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
2734 		k = ieee80211_crypto_encap(ic, m);
2735 		if (k == NULL) {
2736 			freemsg(m);
2737 			sc->sc_tx_err++;
2738 			err = IWK_SUCCESS;
2739 			goto exit;
2740 		}
2741 
2742 		if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
2743 			tx->sec_ctl = 2; /* for CCMP */
2744 			tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
2745 			(void) memcpy(&tx->key, k->wk_key, k->wk_keylen);
2746 		}
2747 
2748 		/* packet header may have moved, reset our local pointer */
2749 		wh = (struct ieee80211_frame *)m->b_rptr;
2750 	}
2751 
2752 	len = msgdsize(m);
2753 
2754 #ifdef DEBUG
2755 	if (iwk_dbg_flags & IWK_DEBUG_TX)
2756 		ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0);
2757 #endif
2758 
2759 	/* pickup a rate */
2760 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
2761 	    IEEE80211_FC0_TYPE_MGT) {
2762 		/* mgmt frames are sent at 1M */
2763 		rate = in->in_rates.ir_rates[0];
2764 	} else {
2765 		/*
2766 		 * do it here for the software way rate control.
2767 		 * later for rate scaling in hardware.
2768 		 * maybe like the following, for management frame:
2769 		 * tx->initial_rate_index = LINK_QUAL_MAX_RETRY_NUM - 1;
2770 		 * for data frame:
2771 		 * tx->tx_flags |= (LE_32(TX_CMD_FLG_STA_RATE_MSK));
2772 		 * rate = in->in_rates.ir_rates[in->in_txrate];
2773 		 * tx->initial_rate_index = 1;
2774 		 *
2775 		 * now the txrate is determined in tx cmd flags, set to the
2776 		 * max value 54M for 11g and 11M for 11b.
2777 		 */
2778 
2779 		if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
2780 			rate = ic->ic_fixed_rate;
2781 		} else {
2782 			rate = in->in_rates.ir_rates[in->in_txrate];
2783 		}
2784 	}
2785 	rate &= IEEE80211_RATE_VAL;
2786 	IWK_DBG((IWK_DEBUG_TX, "tx rate[%d of %d] = %x",
2787 	    in->in_txrate, in->in_rates.ir_nrates, rate));
2788 
2789 	tx->tx_flags |= (LE_32(TX_CMD_FLG_SEQ_CTL_MSK));
2790 
2791 	len0 = roundup(4 + sizeof (iwk_tx_cmd_t) + hdrlen, 4);
2792 	if (len0 != (4 + sizeof (iwk_tx_cmd_t) + hdrlen))
2793 		tx->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
2794 
2795 	/* retrieve destination node's id */
2796 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2797 		tx->sta_id = IWK_BROADCAST_ID;
2798 	} else {
2799 		if (ic->ic_opmode == IEEE80211_M_IBSS)
2800 			tx->sta_id = index;
2801 		else
2802 			tx->sta_id = IWK_AP_ID;
2803 	}
2804 
2805 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
2806 	    IEEE80211_FC0_TYPE_MGT) {
2807 		/* tell h/w to set timestamp in probe responses */
2808 		if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2809 		    IEEE80211_FC0_SUBTYPE_PROBE_RESP)
2810 			tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK);
2811 
2812 		if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2813 		    IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
2814 		    ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2815 		    IEEE80211_FC0_SUBTYPE_REASSOC_REQ))
2816 			tx->timeout.pm_frame_timeout = 3;
2817 		else
2818 			tx->timeout.pm_frame_timeout = 2;
2819 	} else
2820 		tx->timeout.pm_frame_timeout = 0;
2821 	if (rate == 2 || rate == 4 || rate == 11 || rate == 22)
2822 		masks |= RATE_MCS_CCK_MSK;
2823 
2824 	masks |= RATE_MCS_ANT_B_MSK;
2825 	tx->rate.r.rate_n_flags = (iwk_rate_to_plcp(rate) | masks);
2826 
2827 	IWK_DBG((IWK_DEBUG_TX, "tx flag = %x",
2828 	    tx->tx_flags));
2829 
2830 	tx->rts_retry_limit = 60;
2831 	tx->data_retry_limit = 15;
2832 
2833 	tx->stop_time.life_time  = LE_32(0xffffffff);
2834 
2835 	tx->len = LE_16(len);
2836 
2837 	tx->dram_lsb_ptr =
2838 	    data->paddr_cmd + 4 + offsetof(iwk_tx_cmd_t, scratch);
2839 	tx->dram_msb_ptr = 0;
2840 	tx->driver_txop = 0;
2841 	tx->next_frame_len = 0;
2842 
2843 	(void) memcpy(tx + 1, m->b_rptr, hdrlen);
2844 	m->b_rptr += hdrlen;
2845 	(void) memcpy(data->dma_data.mem_va, m->b_rptr, len - hdrlen);
2846 
2847 	IWK_DBG((IWK_DEBUG_TX, "sending data: qid=%d idx=%d len=%d",
2848 	    ring->qid, ring->cur, len));
2849 
2850 	/*
2851 	 * first segment includes the tx cmd plus the 802.11 header,
2852 	 * the second includes the remaining of the 802.11 frame.
2853 	 */
2854 	desc->val0 = LE_32(2 << 24);
2855 	desc->pa[0].tb1_addr = LE_32(data->paddr_cmd);
2856 	desc->pa[0].val1 = ((len0 << 4) & 0xfff0) |
2857 	    ((data->dma_data.cookie.dmac_address & 0xffff) << 16);
2858 	desc->pa[0].val2 =
2859 	    ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) |
2860 	    ((len - hdrlen) << 20);
2861 	IWK_DBG((IWK_DEBUG_TX, "phy addr1 = 0x%x phy addr2 = 0x%x "
2862 	    "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x",
2863 	    data->paddr_cmd, data->dma_data.cookie.dmac_address,
2864 	    len0, len - hdrlen, desc->pa[0].val1, desc->pa[0].val2));
2865 
2866 	mutex_enter(&sc->sc_tx_lock);
2867 	ring->queued++;
2868 	mutex_exit(&sc->sc_tx_lock);
2869 
2870 	/* kick ring */
2871 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
2872 	    tfd_offset[ring->cur].val = 8 + len;
2873 	if (ring->cur < IWK_MAX_WIN_SIZE) {
2874 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
2875 		    tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8 + len;
2876 	}
2877 
2878 	IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
2879 	IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
2880 
2881 	ring->cur = (ring->cur + 1) % ring->count;
2882 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
2883 	freemsg(m);
2884 	/* release node reference */
2885 	ieee80211_free_node(in);
2886 
2887 	ic->ic_stats.is_tx_bytes += len;
2888 	ic->ic_stats.is_tx_frags++;
2889 
2890 	if (sc->sc_tx_timer == 0)
2891 		sc->sc_tx_timer = 10;
2892 exit:
2893 	return (err);
2894 }
2895 
2896 static void
2897 iwk_m_ioctl(void* arg, queue_t *wq, mblk_t *mp)
2898 {
2899 	iwk_sc_t	*sc  = (iwk_sc_t *)arg;
2900 	ieee80211com_t	*ic = &sc->sc_ic;
2901 
2902 	enum ieee80211_opmode		oldmod;
2903 	iwk_tx_power_table_cmd_t	txpower;
2904 	iwk_add_sta_t			node;
2905 	iwk_link_quality_cmd_t		link_quality;
2906 	uint16_t			masks = 0;
2907 	int				i, err, err1;
2908 
2909 	oldmod = ic->ic_opmode;
2910 
2911 	mutex_enter(&sc->sc_glock);
2912 	if (sc->sc_flags & (IWK_F_SUSPEND | IWK_F_HW_ERR_RECOVER)) {
2913 		miocnak(wq, mp, 0, ENXIO);
2914 		mutex_exit(&sc->sc_glock);
2915 		return;
2916 	}
2917 	mutex_exit(&sc->sc_glock);
2918 
2919 	err = ieee80211_ioctl(ic, wq, mp);
2920 
2921 	/*
2922 	 * return to STA mode
2923 	 */
2924 	if ((0 == err || ENETRESET == err) && (oldmod != ic->ic_opmode) &&
2925 	    (ic->ic_opmode == IEEE80211_M_STA)) {
2926 		/* configure rxon */
2927 		(void) memset(&sc->sc_config, 0, sizeof (iwk_rxon_cmd_t));
2928 		IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
2929 		IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
2930 		sc->sc_config.chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
2931 		sc->sc_config.flags = (RXON_FLG_TSF2HOST_MSK |
2932 		    RXON_FLG_AUTO_DETECT_MSK |
2933 		    RXON_FLG_BAND_24G_MSK);
2934 		sc->sc_config.flags &= (~RXON_FLG_CCK_MSK);
2935 		switch (ic->ic_opmode) {
2936 		case IEEE80211_M_STA:
2937 			sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
2938 			sc->sc_config.filter_flags |=
2939 			    LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
2940 			    RXON_FILTER_DIS_DECRYPT_MSK |
2941 			    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
2942 			break;
2943 		case IEEE80211_M_IBSS:
2944 		case IEEE80211_M_AHDEMO:
2945 			sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
2946 			sc->sc_config.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2947 			sc->sc_config.filter_flags =
2948 			    LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
2949 			    RXON_FILTER_DIS_DECRYPT_MSK |
2950 			    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
2951 			break;
2952 		case IEEE80211_M_HOSTAP:
2953 			sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
2954 			break;
2955 		case IEEE80211_M_MONITOR:
2956 			sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
2957 			sc->sc_config.filter_flags |=
2958 			    LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
2959 			    RXON_FILTER_CTL2HOST_MSK |
2960 			    RXON_FILTER_PROMISC_MSK);
2961 			break;
2962 		}
2963 		sc->sc_config.cck_basic_rates  = 0x0f;
2964 		sc->sc_config.ofdm_basic_rates = 0xff;
2965 		sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
2966 		sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff;
2967 		/* set antenna */
2968 		mutex_enter(&sc->sc_glock);
2969 		sc->sc_config.rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
2970 		    LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) |
2971 		    (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
2972 		    (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
2973 		err1 = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
2974 		    sizeof (iwk_rxon_cmd_t), 1);
2975 		if (err1 != IWK_SUCCESS) {
2976 			cmn_err(CE_WARN, "iwk_m_ioctl(): "
2977 			    "failed to set configure command"
2978 			    " please run (ifconfig unplumb and"
2979 			    " ifconfig plumb)\n");
2980 		}
2981 		/*
2982 		 * set Tx power for 2.4GHz channels
2983 		 * (need further investigation. fix tx power at present)
2984 		 */
2985 		(void) memset(&txpower, 0, sizeof (txpower));
2986 		txpower.band = 1; /* for 2.4G */
2987 		txpower.channel = sc->sc_config.chan;
2988 		txpower.channel_normal_width = 0;
2989 		for (i = 0; i < POWER_TABLE_NUM_HT_OFDM_ENTRIES; i++) {
2990 			txpower.tx_power.ht_ofdm_power[i].
2991 			    s.ramon_tx_gain = 0x3f3f;
2992 			txpower.tx_power.ht_ofdm_power[i].
2993 			    s.dsp_predis_atten = 110 | (110 << 8);
2994 		}
2995 		txpower.tx_power.legacy_cck_power.s.ramon_tx_gain = 0x3f3f;
2996 		txpower.tx_power.legacy_cck_power.s.dsp_predis_atten
2997 		    = 110 | (110 << 8);
2998 		err1 = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &txpower,
2999 		    sizeof (txpower), 1);
3000 		if (err1 != IWK_SUCCESS) {
3001 			cmn_err(CE_WARN, "iwk_m_ioctl(): failed to set txpower"
3002 			    " please run (ifconfig unplumb "
3003 			    "and ifconfig plumb)\n");
3004 		}
3005 		/* add broadcast node so that we can send broadcast frame */
3006 		(void) memset(&node, 0, sizeof (node));
3007 		(void) memset(node.bssid, 0xff, 6);
3008 		node.id = IWK_BROADCAST_ID;
3009 		err1 = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
3010 		if (err1 != IWK_SUCCESS) {
3011 			cmn_err(CE_WARN, "iwk_m_ioctl(): "
3012 			    "failed to add broadcast node\n");
3013 		}
3014 
3015 		/* TX_LINK_QUALITY cmd */
3016 		(void) memset(&link_quality, 0, sizeof (link_quality));
3017 		for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3018 			masks |= RATE_MCS_CCK_MSK;
3019 			masks |= RATE_MCS_ANT_B_MSK;
3020 			masks &= ~RATE_MCS_ANT_A_MSK;
3021 			link_quality.rate_n_flags[i] =
3022 			    iwk_rate_to_plcp(2) | masks;
3023 		}
3024 		link_quality.general_params.single_stream_ant_msk = 2;
3025 		link_quality.general_params.dual_stream_ant_msk = 3;
3026 		link_quality.agg_params.agg_dis_start_th = 3;
3027 		link_quality.agg_params.agg_time_limit = LE_16(4000);
3028 		link_quality.sta_id = IWK_BROADCAST_ID;
3029 		err1 = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
3030 		    sizeof (link_quality), 1);
3031 		if (err1 != IWK_SUCCESS) {
3032 			cmn_err(CE_WARN, "iwk_m_ioctl(): "
3033 			    "failed to config link quality table\n");
3034 		}
3035 		mutex_exit(&sc->sc_glock);
3036 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3037 	}
3038 
3039 	if (err == ENETRESET) {
3040 		/*
3041 		 * This is special for the hidden AP connection.
3042 		 * In any case, we should make sure only one 'scan'
3043 		 * in the driver for a 'connect' CLI command. So
3044 		 * when connecting to a hidden AP, the scan is just
3045 		 * sent out to the air when we know the desired
3046 		 * essid of the AP we want to connect.
3047 		 */
3048 		if (ic->ic_des_esslen) {
3049 			if (sc->sc_flags & IWK_F_RUNNING) {
3050 				iwk_m_stop(sc);
3051 				(void) iwk_m_start(sc);
3052 				(void) ieee80211_new_state(ic,
3053 				    IEEE80211_S_SCAN, -1);
3054 			}
3055 		}
3056 	}
3057 }
3058 
3059 /*
3060  * callback functions for set/get properties
3061  */
3062 /* ARGSUSED */
3063 static int
3064 iwk_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3065     uint_t pr_flags, uint_t wldp_length, void *wldp_buf, uint_t *perm)
3066 {
3067 	int		err = 0;
3068 	iwk_sc_t	*sc = (iwk_sc_t *)arg;
3069 
3070 	err = ieee80211_getprop(&sc->sc_ic, pr_name, wldp_pr_num,
3071 	    pr_flags, wldp_length, wldp_buf, perm);
3072 
3073 	return (err);
3074 }
3075 static int
3076 iwk_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3077     uint_t wldp_length, const void *wldp_buf)
3078 {
3079 	int		err;
3080 	iwk_sc_t	*sc = (iwk_sc_t *)arg;
3081 	ieee80211com_t	*ic = &sc->sc_ic;
3082 
3083 	mutex_enter(&sc->sc_glock);
3084 	if (sc->sc_flags & (IWK_F_SUSPEND | IWK_F_HW_ERR_RECOVER)) {
3085 		mutex_exit(&sc->sc_glock);
3086 		return (ENXIO);
3087 	}
3088 	mutex_exit(&sc->sc_glock);
3089 
3090 	err = ieee80211_setprop(ic, pr_name, wldp_pr_num, wldp_length,
3091 	    wldp_buf);
3092 
3093 	if (err == ENETRESET) {
3094 		if (ic->ic_des_esslen) {
3095 			if (sc->sc_flags & IWK_F_RUNNING) {
3096 				iwk_m_stop(sc);
3097 				(void) iwk_m_start(sc);
3098 				(void) ieee80211_new_state(ic,
3099 				    IEEE80211_S_SCAN, -1);
3100 			}
3101 		}
3102 		err = 0;
3103 	}
3104 
3105 	return (err);
3106 }
3107 
3108 /*ARGSUSED*/
3109 static int
3110 iwk_m_stat(void *arg, uint_t stat, uint64_t *val)
3111 {
3112 	iwk_sc_t	*sc  = (iwk_sc_t *)arg;
3113 	ieee80211com_t	*ic = &sc->sc_ic;
3114 	ieee80211_node_t *in;
3115 
3116 	mutex_enter(&sc->sc_glock);
3117 	switch (stat) {
3118 	case MAC_STAT_IFSPEED:
3119 		in = ic->ic_bss;
3120 		*val = ((ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) ?
3121 		    IEEE80211_RATE(in->in_txrate) :
3122 		    ic->ic_fixed_rate) / 2 * 1000000;
3123 		break;
3124 	case MAC_STAT_NOXMTBUF:
3125 		*val = sc->sc_tx_nobuf;
3126 		break;
3127 	case MAC_STAT_NORCVBUF:
3128 		*val = sc->sc_rx_nobuf;
3129 		break;
3130 	case MAC_STAT_IERRORS:
3131 		*val = sc->sc_rx_err;
3132 		break;
3133 	case MAC_STAT_RBYTES:
3134 		*val = ic->ic_stats.is_rx_bytes;
3135 		break;
3136 	case MAC_STAT_IPACKETS:
3137 		*val = ic->ic_stats.is_rx_frags;
3138 		break;
3139 	case MAC_STAT_OBYTES:
3140 		*val = ic->ic_stats.is_tx_bytes;
3141 		break;
3142 	case MAC_STAT_OPACKETS:
3143 		*val = ic->ic_stats.is_tx_frags;
3144 		break;
3145 	case MAC_STAT_OERRORS:
3146 	case WIFI_STAT_TX_FAILED:
3147 		*val = sc->sc_tx_err;
3148 		break;
3149 	case WIFI_STAT_TX_RETRANS:
3150 		*val = sc->sc_tx_retries;
3151 		break;
3152 	case WIFI_STAT_FCS_ERRORS:
3153 	case WIFI_STAT_WEP_ERRORS:
3154 	case WIFI_STAT_TX_FRAGS:
3155 	case WIFI_STAT_MCAST_TX:
3156 	case WIFI_STAT_RTS_SUCCESS:
3157 	case WIFI_STAT_RTS_FAILURE:
3158 	case WIFI_STAT_ACK_FAILURE:
3159 	case WIFI_STAT_RX_FRAGS:
3160 	case WIFI_STAT_MCAST_RX:
3161 	case WIFI_STAT_RX_DUPS:
3162 		mutex_exit(&sc->sc_glock);
3163 		return (ieee80211_stat(ic, stat, val));
3164 	default:
3165 		mutex_exit(&sc->sc_glock);
3166 		return (ENOTSUP);
3167 	}
3168 	mutex_exit(&sc->sc_glock);
3169 
3170 	return (IWK_SUCCESS);
3171 
3172 }
3173 
3174 static int
3175 iwk_m_start(void *arg)
3176 {
3177 	iwk_sc_t *sc = (iwk_sc_t *)arg;
3178 	ieee80211com_t	*ic = &sc->sc_ic;
3179 	int err;
3180 
3181 	err = iwk_init(sc);
3182 
3183 	if (err != IWK_SUCCESS) {
3184 		/*
3185 		 * The hw init err(eg. RF is OFF). Return Success to make
3186 		 * the 'plumb' succeed. The iwk_thread() tries to re-init
3187 		 * background.
3188 		 */
3189 		cmn_err(CE_WARN, "iwk_m_start(): failed to initialize "
3190 		    "hardware\n");
3191 		mutex_enter(&sc->sc_glock);
3192 		sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
3193 		mutex_exit(&sc->sc_glock);
3194 		return (IWK_SUCCESS);
3195 	}
3196 
3197 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3198 
3199 	mutex_enter(&sc->sc_glock);
3200 	sc->sc_flags |= IWK_F_RUNNING;
3201 	mutex_exit(&sc->sc_glock);
3202 
3203 	return (IWK_SUCCESS);
3204 }
3205 
3206 static void
3207 iwk_m_stop(void *arg)
3208 {
3209 	iwk_sc_t *sc = (iwk_sc_t *)arg;
3210 	ieee80211com_t	*ic = &sc->sc_ic;
3211 
3212 	iwk_stop(sc);
3213 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3214 	ieee80211_stop_watchdog(ic);
3215 	mutex_enter(&sc->sc_mt_lock);
3216 	sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
3217 	sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL;
3218 	mutex_exit(&sc->sc_mt_lock);
3219 	mutex_enter(&sc->sc_glock);
3220 	sc->sc_flags &= ~IWK_F_RUNNING;
3221 	mutex_exit(&sc->sc_glock);
3222 }
3223 
3224 /*ARGSUSED*/
3225 static int
3226 iwk_m_unicst(void *arg, const uint8_t *macaddr)
3227 {
3228 	iwk_sc_t *sc = (iwk_sc_t *)arg;
3229 	ieee80211com_t	*ic = &sc->sc_ic;
3230 	int err;
3231 
3232 	if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) {
3233 		IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr);
3234 		mutex_enter(&sc->sc_glock);
3235 		err = iwk_config(sc);
3236 		mutex_exit(&sc->sc_glock);
3237 		if (err != IWK_SUCCESS) {
3238 			cmn_err(CE_WARN,
3239 			    "iwk_m_unicst(): "
3240 			    "failed to configure device\n");
3241 			goto fail;
3242 		}
3243 	}
3244 	return (IWK_SUCCESS);
3245 fail:
3246 	return (err);
3247 }
3248 
3249 /*ARGSUSED*/
3250 static int
3251 iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m)
3252 {
3253 	return (IWK_SUCCESS);
3254 }
3255 
3256 /*ARGSUSED*/
3257 static int
3258 iwk_m_promisc(void *arg, boolean_t on)
3259 {
3260 	return (IWK_SUCCESS);
3261 }
3262 
3263 static void
3264 iwk_thread(iwk_sc_t *sc)
3265 {
3266 	ieee80211com_t	*ic = &sc->sc_ic;
3267 	clock_t clk;
3268 	int times = 0, err, n = 0, timeout = 0;
3269 	uint32_t tmp;
3270 
3271 	mutex_enter(&sc->sc_mt_lock);
3272 	while (sc->sc_mf_thread_switch) {
3273 		tmp = IWK_READ(sc, CSR_GP_CNTRL);
3274 		if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) {
3275 			sc->sc_flags &= ~IWK_F_RADIO_OFF;
3276 		} else {
3277 			sc->sc_flags |= IWK_F_RADIO_OFF;
3278 		}
3279 		/*
3280 		 * If in SUSPEND or the RF is OFF, do nothing
3281 		 */
3282 		if ((sc->sc_flags & IWK_F_SUSPEND) ||
3283 		    (sc->sc_flags & IWK_F_RADIO_OFF)) {
3284 			mutex_exit(&sc->sc_mt_lock);
3285 			delay(drv_usectohz(100000));
3286 			mutex_enter(&sc->sc_mt_lock);
3287 			continue;
3288 		}
3289 
3290 		/*
3291 		 * recovery fatal error
3292 		 */
3293 		if (ic->ic_mach &&
3294 		    (sc->sc_flags & IWK_F_HW_ERR_RECOVER)) {
3295 
3296 			IWK_DBG((IWK_DEBUG_FW,
3297 			    "iwk_thread(): "
3298 			    "try to recover fatal hw error: %d\n", times++));
3299 
3300 			iwk_stop(sc);
3301 
3302 			mutex_exit(&sc->sc_mt_lock);
3303 			ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3304 			delay(drv_usectohz(2000000 + n*500000));
3305 			mutex_enter(&sc->sc_mt_lock);
3306 
3307 			err = iwk_init(sc);
3308 			if (err != IWK_SUCCESS) {
3309 				n++;
3310 				if (n < 20)
3311 					continue;
3312 			}
3313 			n = 0;
3314 			if (!err)
3315 				sc->sc_flags |= IWK_F_RUNNING;
3316 			sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
3317 			mutex_exit(&sc->sc_mt_lock);
3318 			delay(drv_usectohz(2000000));
3319 			if (sc->sc_ostate != IEEE80211_S_INIT)
3320 				ieee80211_new_state(ic, IEEE80211_S_SCAN, 0);
3321 			mutex_enter(&sc->sc_mt_lock);
3322 		}
3323 
3324 		if (ic->ic_mach && (sc->sc_flags & IWK_F_LAZY_RESUME)) {
3325 			IWK_DBG((IWK_DEBUG_RESUME,
3326 			    "iwk_thread(): "
3327 			    "lazy resume\n"));
3328 			sc->sc_flags &= ~IWK_F_LAZY_RESUME;
3329 			mutex_exit(&sc->sc_mt_lock);
3330 			/*
3331 			 * NB: under WPA mode, this call hangs (door problem?)
3332 			 * when called in iwk_attach() and iwk_detach() while
3333 			 * system is in the procedure of CPR. To be safe, let
3334 			 * the thread do this.
3335 			 */
3336 			ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
3337 			mutex_enter(&sc->sc_mt_lock);
3338 		}
3339 
3340 		if (ic->ic_mach &&
3341 		    (sc->sc_flags & IWK_F_SCANNING) && sc->sc_scan_pending) {
3342 			IWK_DBG((IWK_DEBUG_SCAN,
3343 			    "iwk_thread(): "
3344 			    "wait for probe response\n"));
3345 			sc->sc_scan_pending--;
3346 			mutex_exit(&sc->sc_mt_lock);
3347 			delay(drv_usectohz(200000));
3348 			if (sc->sc_flags & IWK_F_SCANNING)
3349 				ieee80211_next_scan(ic);
3350 			mutex_enter(&sc->sc_mt_lock);
3351 		}
3352 
3353 		/*
3354 		 * rate ctl
3355 		 */
3356 		if (ic->ic_mach &&
3357 		    (sc->sc_flags & IWK_F_RATE_AUTO_CTL)) {
3358 			clk = ddi_get_lbolt();
3359 			if (clk > sc->sc_clk + drv_usectohz(500000)) {
3360 				iwk_amrr_timeout(sc);
3361 			}
3362 		}
3363 
3364 		mutex_exit(&sc->sc_mt_lock);
3365 		delay(drv_usectohz(100000));
3366 		mutex_enter(&sc->sc_mt_lock);
3367 
3368 		if (sc->sc_tx_timer) {
3369 			timeout++;
3370 			if (timeout == 10) {
3371 				sc->sc_tx_timer--;
3372 				if (sc->sc_tx_timer == 0) {
3373 					sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
3374 					sc->sc_ostate = IEEE80211_S_RUN;
3375 					IWK_DBG((IWK_DEBUG_FW,
3376 					    "iwk_thread(): try to recover from"
3377 					    " 'send fail\n"));
3378 				}
3379 				timeout = 0;
3380 			}
3381 		}
3382 
3383 	}
3384 	sc->sc_mf_thread = NULL;
3385 	cv_signal(&sc->sc_mt_cv);
3386 	mutex_exit(&sc->sc_mt_lock);
3387 }
3388 
3389 
3390 /*
3391  * Send a command to the firmware.
3392  */
3393 static int
3394 iwk_cmd(iwk_sc_t *sc, int code, const void *buf, int size, int async)
3395 {
3396 	iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM];
3397 	iwk_tx_desc_t *desc;
3398 	iwk_cmd_t *cmd;
3399 	clock_t clk;
3400 
3401 	ASSERT(size <= sizeof (cmd->data));
3402 	ASSERT(mutex_owned(&sc->sc_glock));
3403 
3404 	IWK_DBG((IWK_DEBUG_CMD, "iwk_cmd() code[%d]", code));
3405 	desc = ring->data[ring->cur].desc;
3406 	cmd = ring->data[ring->cur].cmd;
3407 
3408 	cmd->hdr.type = (uint8_t)code;
3409 	cmd->hdr.flags = 0;
3410 	cmd->hdr.qid = ring->qid;
3411 	cmd->hdr.idx = ring->cur;
3412 	(void) memcpy(cmd->data, buf, size);
3413 	(void) memset(desc, 0, sizeof (*desc));
3414 
3415 	desc->val0 = LE_32(1 << 24);
3416 	desc->pa[0].tb1_addr =
3417 	    (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff);
3418 	desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0;
3419 
3420 	/* kick cmd ring XXX */
3421 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3422 	    tfd_offset[ring->cur].val = 8;
3423 	if (ring->cur < IWK_MAX_WIN_SIZE) {
3424 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3425 		    tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8;
3426 	}
3427 	ring->cur = (ring->cur + 1) % ring->count;
3428 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3429 
3430 	if (async)
3431 		return (IWK_SUCCESS);
3432 	else {
3433 		sc->sc_flags &= ~IWK_F_CMD_DONE;
3434 		clk = ddi_get_lbolt() + drv_usectohz(2000000);
3435 		while (!(sc->sc_flags & IWK_F_CMD_DONE)) {
3436 			if (cv_timedwait(&sc->sc_cmd_cv, &sc->sc_glock, clk) <
3437 			    0)
3438 				break;
3439 		}
3440 		if (sc->sc_flags & IWK_F_CMD_DONE)
3441 			return (IWK_SUCCESS);
3442 		else
3443 			return (IWK_FAIL);
3444 	}
3445 }
3446 
3447 static void
3448 iwk_set_led(iwk_sc_t *sc, uint8_t id, uint8_t off, uint8_t on)
3449 {
3450 	iwk_led_cmd_t led;
3451 
3452 	led.interval = LE_32(100000);	/* unit: 100ms */
3453 	led.id = id;
3454 	led.off = off;
3455 	led.on = on;
3456 
3457 	(void) iwk_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1);
3458 }
3459 
3460 static int
3461 iwk_hw_set_before_auth(iwk_sc_t *sc)
3462 {
3463 	ieee80211com_t *ic = &sc->sc_ic;
3464 	ieee80211_node_t *in = ic->ic_bss;
3465 	iwk_add_sta_t node;
3466 	iwk_link_quality_cmd_t link_quality;
3467 	struct ieee80211_rateset rs;
3468 	uint16_t masks = 0, rate;
3469 	int i, err;
3470 
3471 	if (in->in_chan == IEEE80211_CHAN_ANYC) {
3472 		cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
3473 		    "channel (%d) isn't in proper range\n",
3474 		    ieee80211_chan2ieee(ic, in->in_chan));
3475 		return (IWK_FAIL);
3476 	}
3477 
3478 	/* update adapter's configuration according the info of target AP */
3479 	IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
3480 	sc->sc_config.chan = ieee80211_chan2ieee(ic, in->in_chan);
3481 	if (ic->ic_curmode == IEEE80211_MODE_11B) {
3482 		sc->sc_config.cck_basic_rates  = 0x03;
3483 		sc->sc_config.ofdm_basic_rates = 0;
3484 	} else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
3485 	    (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
3486 		sc->sc_config.cck_basic_rates  = 0;
3487 		sc->sc_config.ofdm_basic_rates = 0x15;
3488 	} else { /* assume 802.11b/g */
3489 		sc->sc_config.cck_basic_rates  = 0x0f;
3490 		sc->sc_config.ofdm_basic_rates = 0xff;
3491 	}
3492 
3493 	sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
3494 	    RXON_FLG_SHORT_SLOT_MSK);
3495 
3496 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
3497 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK);
3498 	else
3499 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK);
3500 
3501 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
3502 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
3503 	else
3504 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK);
3505 
3506 	IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x "
3507 	    "filter_flags %x  cck %x ofdm %x"
3508 	    " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n",
3509 	    sc->sc_config.chan, sc->sc_config.flags,
3510 	    sc->sc_config.filter_flags,
3511 	    sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates,
3512 	    sc->sc_config.bssid[0], sc->sc_config.bssid[1],
3513 	    sc->sc_config.bssid[2], sc->sc_config.bssid[3],
3514 	    sc->sc_config.bssid[4], sc->sc_config.bssid[5]));
3515 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
3516 	    sizeof (iwk_rxon_cmd_t), 1);
3517 	if (err != IWK_SUCCESS) {
3518 		cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
3519 		    " failed to config chan%d\n",
3520 		    sc->sc_config.chan);
3521 		return (err);
3522 	}
3523 
3524 	/* obtain current temperature of chipset */
3525 	sc->sc_tempera = iwk_curr_tempera(sc);
3526 
3527 	/* make Tx power calibration to determine the gains of DSP and radio */
3528 	err = iwk_tx_power_calibration(sc);
3529 	if (err) {
3530 		cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
3531 		    "failed to set tx power table\n");
3532 		return (err);
3533 	}
3534 
3535 	/* add default AP node */
3536 	(void) memset(&node, 0, sizeof (node));
3537 	IEEE80211_ADDR_COPY(node.bssid, in->in_bssid);
3538 	node.id = IWK_AP_ID;
3539 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
3540 	if (err != IWK_SUCCESS) {
3541 		cmn_err(CE_WARN, "iwk_hw_set_before_auth(): "
3542 		    "failed to add BSS node\n");
3543 		return (err);
3544 	}
3545 
3546 	/* TX_LINK_QUALITY cmd */
3547 	(void) memset(&link_quality, 0, sizeof (link_quality));
3548 	rs = ic->ic_sup_rates[ieee80211_chan2mode(ic, ic->ic_curchan)];
3549 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3550 		if (i < rs.ir_nrates)
3551 			rate = rs.ir_rates[rs.ir_nrates - i];
3552 		else
3553 			rate = 2;
3554 		if (rate == 2 || rate == 4 || rate == 11 || rate == 22)
3555 			masks |= RATE_MCS_CCK_MSK;
3556 		masks |= RATE_MCS_ANT_B_MSK;
3557 		masks &= ~RATE_MCS_ANT_A_MSK;
3558 		link_quality.rate_n_flags[i] =
3559 		    iwk_rate_to_plcp(rate) | masks;
3560 	}
3561 
3562 	link_quality.general_params.single_stream_ant_msk = 2;
3563 	link_quality.general_params.dual_stream_ant_msk = 3;
3564 	link_quality.agg_params.agg_dis_start_th = 3;
3565 	link_quality.agg_params.agg_time_limit = LE_16(4000);
3566 	link_quality.sta_id = IWK_AP_ID;
3567 	err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
3568 	    sizeof (link_quality), 1);
3569 	if (err != IWK_SUCCESS) {
3570 		cmn_err(CE_WARN, "iwk_hw_set_before_auth(): "
3571 		    "failed to config link quality table\n");
3572 		return (err);
3573 	}
3574 
3575 	return (IWK_SUCCESS);
3576 }
3577 
3578 /*
3579  * Send a scan request(assembly scan cmd) to the firmware.
3580  */
3581 static int
3582 iwk_scan(iwk_sc_t *sc)
3583 {
3584 	ieee80211com_t *ic = &sc->sc_ic;
3585 	iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM];
3586 	iwk_tx_desc_t *desc;
3587 	iwk_tx_data_t *data;
3588 	iwk_cmd_t *cmd;
3589 	iwk_scan_hdr_t *hdr;
3590 	iwk_scan_chan_t *chan;
3591 	struct ieee80211_frame *wh;
3592 	ieee80211_node_t *in = ic->ic_bss;
3593 	uint8_t essid[IEEE80211_NWID_LEN+1];
3594 	struct ieee80211_rateset *rs;
3595 	enum ieee80211_phymode mode;
3596 	uint8_t *frm;
3597 	int i, pktlen, nrates;
3598 
3599 	data = &ring->data[ring->cur];
3600 	desc = data->desc;
3601 	cmd = (iwk_cmd_t *)data->dma_data.mem_va;
3602 
3603 	cmd->hdr.type = REPLY_SCAN_CMD;
3604 	cmd->hdr.flags = 0;
3605 	cmd->hdr.qid = ring->qid;
3606 	cmd->hdr.idx = ring->cur | 0x40;
3607 
3608 	hdr = (iwk_scan_hdr_t *)cmd->data;
3609 	(void) memset(hdr, 0, sizeof (iwk_scan_hdr_t));
3610 	hdr->nchan = 1;
3611 	hdr->quiet_time = LE_16(50);
3612 	hdr->quiet_plcp_th = LE_16(1);
3613 
3614 	hdr->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
3615 	hdr->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3616 	    LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) |
3617 	    (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3618 	    (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3619 
3620 	hdr->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
3621 	hdr->tx_cmd.sta_id = IWK_BROADCAST_ID;
3622 	hdr->tx_cmd.stop_time.life_time = 0xffffffff;
3623 	hdr->tx_cmd.tx_flags |= (0x200);
3624 	hdr->tx_cmd.rate.r.rate_n_flags = iwk_rate_to_plcp(2);
3625 	hdr->tx_cmd.rate.r.rate_n_flags |=
3626 	    (RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK);
3627 	hdr->direct_scan[0].len = ic->ic_des_esslen;
3628 	hdr->direct_scan[0].id  = IEEE80211_ELEMID_SSID;
3629 
3630 	if (ic->ic_des_esslen) {
3631 		bcopy(ic->ic_des_essid, essid, ic->ic_des_esslen);
3632 		essid[ic->ic_des_esslen] = '\0';
3633 		IWK_DBG((IWK_DEBUG_SCAN, "directed scan %s\n", essid));
3634 
3635 		bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid,
3636 		    ic->ic_des_esslen);
3637 	} else {
3638 		bzero(hdr->direct_scan[0].ssid,
3639 		    sizeof (hdr->direct_scan[0].ssid));
3640 	}
3641 	/*
3642 	 * a probe request frame is required after the REPLY_SCAN_CMD
3643 	 */
3644 	wh = (struct ieee80211_frame *)(hdr + 1);
3645 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
3646 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
3647 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
3648 	(void) memset(wh->i_addr1, 0xff, 6);
3649 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr);
3650 	(void) memset(wh->i_addr3, 0xff, 6);
3651 	*(uint16_t *)&wh->i_dur[0] = 0;
3652 	*(uint16_t *)&wh->i_seq[0] = 0;
3653 
3654 	frm = (uint8_t *)(wh + 1);
3655 
3656 	/* essid IE */
3657 	if (in->in_esslen) {
3658 		bcopy(in->in_essid, essid, in->in_esslen);
3659 		essid[in->in_esslen] = '\0';
3660 		IWK_DBG((IWK_DEBUG_SCAN, "probe with ESSID %s\n",
3661 		    essid));
3662 	}
3663 	*frm++ = IEEE80211_ELEMID_SSID;
3664 	*frm++ = in->in_esslen;
3665 	(void) memcpy(frm, in->in_essid, in->in_esslen);
3666 	frm += in->in_esslen;
3667 
3668 	mode = ieee80211_chan2mode(ic, ic->ic_curchan);
3669 	rs = &ic->ic_sup_rates[mode];
3670 
3671 	/* supported rates IE */
3672 	*frm++ = IEEE80211_ELEMID_RATES;
3673 	nrates = rs->ir_nrates;
3674 	if (nrates > IEEE80211_RATE_SIZE)
3675 		nrates = IEEE80211_RATE_SIZE;
3676 	*frm++ = (uint8_t)nrates;
3677 	(void) memcpy(frm, rs->ir_rates, nrates);
3678 	frm += nrates;
3679 
3680 	/* supported xrates IE */
3681 	if (rs->ir_nrates > IEEE80211_RATE_SIZE) {
3682 		nrates = rs->ir_nrates - IEEE80211_RATE_SIZE;
3683 		*frm++ = IEEE80211_ELEMID_XRATES;
3684 		*frm++ = (uint8_t)nrates;
3685 		(void) memcpy(frm, rs->ir_rates + IEEE80211_RATE_SIZE, nrates);
3686 		frm += nrates;
3687 	}
3688 
3689 	/* optionnal IE (usually for wpa) */
3690 	if (ic->ic_opt_ie != NULL) {
3691 		(void) memcpy(frm, ic->ic_opt_ie, ic->ic_opt_ie_len);
3692 		frm += ic->ic_opt_ie_len;
3693 	}
3694 
3695 	/* setup length of probe request */
3696 	hdr->tx_cmd.len = LE_16(_PTRDIFF(frm, wh));
3697 	hdr->len = hdr->nchan * sizeof (iwk_scan_chan_t) +
3698 	    hdr->tx_cmd.len + sizeof (iwk_scan_hdr_t);
3699 
3700 	/*
3701 	 * the attribute of the scan channels are required after the probe
3702 	 * request frame.
3703 	 */
3704 	chan = (iwk_scan_chan_t *)frm;
3705 	for (i = 1; i <= hdr->nchan; i++, chan++) {
3706 		if (ic->ic_des_esslen) {
3707 			chan->type = 3;
3708 		} else {
3709 			chan->type = 1;
3710 		}
3711 
3712 		chan->chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
3713 		chan->tpc.tx_gain = 0x3f;
3714 		chan->tpc.dsp_atten = 110;
3715 		chan->active_dwell = LE_16(50);
3716 		chan->passive_dwell = LE_16(120);
3717 
3718 		frm += sizeof (iwk_scan_chan_t);
3719 	}
3720 
3721 	pktlen = _PTRDIFF(frm, cmd);
3722 
3723 	(void) memset(desc, 0, sizeof (*desc));
3724 	desc->val0 = LE_32(1 << 24);
3725 	desc->pa[0].tb1_addr =
3726 	    (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff);
3727 	desc->pa[0].val1 = (pktlen << 4) & 0xfff0;
3728 
3729 	/*
3730 	 * maybe for cmd, filling the byte cnt table is not necessary.
3731 	 * anyway, we fill it here.
3732 	 */
3733 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3734 	    tfd_offset[ring->cur].val = 8;
3735 	if (ring->cur < IWK_MAX_WIN_SIZE) {
3736 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3737 		    tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8;
3738 	}
3739 
3740 	/* kick cmd ring */
3741 	ring->cur = (ring->cur + 1) % ring->count;
3742 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3743 
3744 	return (IWK_SUCCESS);
3745 }
3746 
3747 static int
3748 iwk_config(iwk_sc_t *sc)
3749 {
3750 	ieee80211com_t *ic = &sc->sc_ic;
3751 	iwk_powertable_cmd_t powertable;
3752 	iwk_bt_cmd_t bt;
3753 	iwk_add_sta_t node;
3754 	iwk_link_quality_cmd_t link_quality;
3755 	int i, err;
3756 	uint16_t masks = 0;
3757 
3758 	/*
3759 	 * set power mode. Disable power management at present, do it later
3760 	 */
3761 	(void) memset(&powertable, 0, sizeof (powertable));
3762 	powertable.flags = LE_16(0x8);
3763 	err = iwk_cmd(sc, POWER_TABLE_CMD, &powertable,
3764 	    sizeof (powertable), 0);
3765 	if (err != IWK_SUCCESS) {
3766 		cmn_err(CE_WARN, "iwk_config(): failed to set power mode\n");
3767 		return (err);
3768 	}
3769 
3770 	/* configure bt coexistence */
3771 	(void) memset(&bt, 0, sizeof (bt));
3772 	bt.flags = 3;
3773 	bt.lead_time = 0xaa;
3774 	bt.max_kill = 1;
3775 	err = iwk_cmd(sc, REPLY_BT_CONFIG, &bt,
3776 	    sizeof (bt), 0);
3777 	if (err != IWK_SUCCESS) {
3778 		cmn_err(CE_WARN,
3779 		    "iwk_config(): "
3780 		    "failed to configurate bt coexistence\n");
3781 		return (err);
3782 	}
3783 
3784 	/* configure rxon */
3785 	(void) memset(&sc->sc_config, 0, sizeof (iwk_rxon_cmd_t));
3786 	IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
3787 	IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
3788 	sc->sc_config.chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
3789 	sc->sc_config.flags = (RXON_FLG_TSF2HOST_MSK |
3790 	    RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_BAND_24G_MSK);
3791 	sc->sc_config.flags &= (~RXON_FLG_CCK_MSK);
3792 	switch (ic->ic_opmode) {
3793 	case IEEE80211_M_STA:
3794 		sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
3795 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3796 		    RXON_FILTER_DIS_DECRYPT_MSK |
3797 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
3798 		break;
3799 	case IEEE80211_M_IBSS:
3800 	case IEEE80211_M_AHDEMO:
3801 		sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
3802 		sc->sc_config.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
3803 		sc->sc_config.filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3804 		    RXON_FILTER_DIS_DECRYPT_MSK |
3805 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
3806 		break;
3807 	case IEEE80211_M_HOSTAP:
3808 		sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
3809 		break;
3810 	case IEEE80211_M_MONITOR:
3811 		sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
3812 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3813 		    RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
3814 		break;
3815 	}
3816 	sc->sc_config.cck_basic_rates  = 0x0f;
3817 	sc->sc_config.ofdm_basic_rates = 0xff;
3818 
3819 	sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
3820 	sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff;
3821 
3822 	/* set antenna */
3823 
3824 	sc->sc_config.rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3825 	    LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) |
3826 	    (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3827 	    (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3828 
3829 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
3830 	    sizeof (iwk_rxon_cmd_t), 0);
3831 	if (err != IWK_SUCCESS) {
3832 		cmn_err(CE_WARN, "iwk_config(): "
3833 		    "failed to set configure command\n");
3834 		return (err);
3835 	}
3836 	/* obtain current temperature of chipset */
3837 	sc->sc_tempera = iwk_curr_tempera(sc);
3838 
3839 	/* make Tx power calibration to determine the gains of DSP and radio */
3840 	err = iwk_tx_power_calibration(sc);
3841 	if (err) {
3842 		cmn_err(CE_WARN, "iwk_config(): "
3843 		    "failed to set tx power table\n");
3844 		return (err);
3845 	}
3846 
3847 	/* add broadcast node so that we can send broadcast frame */
3848 	(void) memset(&node, 0, sizeof (node));
3849 	(void) memset(node.bssid, 0xff, 6);
3850 	node.id = IWK_BROADCAST_ID;
3851 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0);
3852 	if (err != IWK_SUCCESS) {
3853 		cmn_err(CE_WARN, "iwk_config(): "
3854 		    "failed to add broadcast node\n");
3855 		return (err);
3856 	}
3857 
3858 	/* TX_LINK_QUALITY cmd ? */
3859 	(void) memset(&link_quality, 0, sizeof (link_quality));
3860 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3861 		masks |= RATE_MCS_CCK_MSK;
3862 		masks |= RATE_MCS_ANT_B_MSK;
3863 		masks &= ~RATE_MCS_ANT_A_MSK;
3864 		link_quality.rate_n_flags[i] = iwk_rate_to_plcp(2) | masks;
3865 	}
3866 
3867 	link_quality.general_params.single_stream_ant_msk = 2;
3868 	link_quality.general_params.dual_stream_ant_msk = 3;
3869 	link_quality.agg_params.agg_dis_start_th = 3;
3870 	link_quality.agg_params.agg_time_limit = LE_16(4000);
3871 	link_quality.sta_id = IWK_BROADCAST_ID;
3872 	err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
3873 	    sizeof (link_quality), 0);
3874 	if (err != IWK_SUCCESS) {
3875 		cmn_err(CE_WARN, "iwk_config(): "
3876 		    "failed to config link quality table\n");
3877 		return (err);
3878 	}
3879 
3880 	return (IWK_SUCCESS);
3881 }
3882 
3883 static void
3884 iwk_stop_master(iwk_sc_t *sc)
3885 {
3886 	uint32_t tmp;
3887 	int n;
3888 
3889 	tmp = IWK_READ(sc, CSR_RESET);
3890 	IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER);
3891 
3892 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
3893 	if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) ==
3894 	    CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE)
3895 		return;
3896 
3897 	for (n = 0; n < 2000; n++) {
3898 		if (IWK_READ(sc, CSR_RESET) &
3899 		    CSR_RESET_REG_FLAG_MASTER_DISABLED)
3900 			break;
3901 		DELAY(1000);
3902 	}
3903 	if (n == 2000)
3904 		IWK_DBG((IWK_DEBUG_HW,
3905 		    "timeout waiting for master stop\n"));
3906 }
3907 
3908 static int
3909 iwk_power_up(iwk_sc_t *sc)
3910 {
3911 	uint32_t tmp;
3912 
3913 	iwk_mac_access_enter(sc);
3914 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3915 	tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC;
3916 	tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN;
3917 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3918 	iwk_mac_access_exit(sc);
3919 
3920 	DELAY(5000);
3921 	return (IWK_SUCCESS);
3922 }
3923 
3924 static int
3925 iwk_preinit(iwk_sc_t *sc)
3926 {
3927 	uint32_t tmp;
3928 	int n;
3929 	uint8_t vlink;
3930 
3931 	/* clear any pending interrupts */
3932 	IWK_WRITE(sc, CSR_INT, 0xffffffff);
3933 
3934 	tmp = IWK_READ(sc, CSR_GIO_CHICKEN_BITS);
3935 	IWK_WRITE(sc, CSR_GIO_CHICKEN_BITS,
3936 	    tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
3937 
3938 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
3939 	IWK_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
3940 
3941 	/* wait for clock ready */
3942 	for (n = 0; n < 1000; n++) {
3943 		if (IWK_READ(sc, CSR_GP_CNTRL) &
3944 		    CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY)
3945 			break;
3946 		DELAY(10);
3947 	}
3948 	if (n == 1000) {
3949 		cmn_err(CE_WARN,
3950 		    "iwk_preinit(): timeout waiting for clock ready\n");
3951 		return (ETIMEDOUT);
3952 	}
3953 	iwk_mac_access_enter(sc);
3954 	tmp = iwk_reg_read(sc, APMG_CLK_CTRL_REG);
3955 	iwk_reg_write(sc, APMG_CLK_CTRL_REG, tmp |
3956 	    APMG_CLK_REG_VAL_DMA_CLK_RQT | APMG_CLK_REG_VAL_BSM_CLK_RQT);
3957 
3958 	DELAY(20);
3959 	tmp = iwk_reg_read(sc, ALM_APMG_PCIDEV_STT);
3960 	iwk_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp |
3961 	    APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE);
3962 	iwk_mac_access_exit(sc);
3963 
3964 	IWK_WRITE(sc, CSR_INT_COALESCING, 512 / 32); /* ??? */
3965 
3966 	(void) iwk_power_up(sc);
3967 
3968 	if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) {
3969 		tmp = ddi_get32(sc->sc_cfg_handle,
3970 		    (uint32_t *)(sc->sc_cfg_base + 0xe8));
3971 		ddi_put32(sc->sc_cfg_handle,
3972 		    (uint32_t *)(sc->sc_cfg_base + 0xe8),
3973 		    tmp & ~(1 << 11));
3974 	}
3975 
3976 
3977 	vlink = ddi_get8(sc->sc_cfg_handle,
3978 	    (uint8_t *)(sc->sc_cfg_base + 0xf0));
3979 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0),
3980 	    vlink & ~2);
3981 
3982 	tmp = IWK_READ(sc, CSR_SW_VER);
3983 	tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
3984 	    CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
3985 	    CSR_HW_IF_CONFIG_REG_BIT_KEDRON_R;
3986 	IWK_WRITE(sc, CSR_SW_VER, tmp);
3987 
3988 	/* make sure power supply on each part of the hardware */
3989 	iwk_mac_access_enter(sc);
3990 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3991 	tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
3992 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3993 	DELAY(5);
3994 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3995 	tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
3996 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3997 	iwk_mac_access_exit(sc);
3998 	return (IWK_SUCCESS);
3999 }
4000 
4001 /*
4002  * set up semphore flag to own EEPROM
4003  */
4004 static int iwk_eep_sem_down(iwk_sc_t *sc)
4005 {
4006 	int count1, count2;
4007 	uint32_t tmp;
4008 
4009 	for (count1 = 0; count1 < 1000; count1++) {
4010 		tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG);
4011 		IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4012 		    tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM);
4013 
4014 		for (count2 = 0; count2 < 2; count2++) {
4015 			if (IWK_READ(sc, CSR_HW_IF_CONFIG_REG) &
4016 			    CSR_HW_IF_CONFIG_REG_EEP_SEM)
4017 				return (IWK_SUCCESS);
4018 			DELAY(10000);
4019 		}
4020 	}
4021 	return (IWK_FAIL);
4022 }
4023 
4024 /*
4025  * reset semphore flag to release EEPROM
4026  */
4027 static void iwk_eep_sem_up(iwk_sc_t *sc)
4028 {
4029 	uint32_t tmp;
4030 
4031 	tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG);
4032 	IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4033 	    tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM));
4034 }
4035 
4036 /*
4037  * This function load all infomation in eeprom into iwk_eep
4038  * structure in iwk_sc_t structure
4039  */
4040 static int iwk_eep_load(iwk_sc_t *sc)
4041 {
4042 	int i, rr;
4043 	uint32_t rv, tmp, eep_gp;
4044 	uint16_t addr, eep_sz = sizeof (sc->sc_eep_map);
4045 	uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map;
4046 
4047 	/* read eeprom gp register in CSR */
4048 	eep_gp = IWK_READ(sc, CSR_EEPROM_GP);
4049 	if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) ==
4050 	    CSR_EEPROM_GP_BAD_SIGNATURE) {
4051 		cmn_err(CE_WARN, "EEPROM not found\n");
4052 		return (IWK_FAIL);
4053 	}
4054 
4055 	rr = iwk_eep_sem_down(sc);
4056 	if (rr != 0) {
4057 		cmn_err(CE_WARN, "failed to own EEPROM\n");
4058 		return (IWK_FAIL);
4059 	}
4060 
4061 	for (addr = 0; addr < eep_sz; addr += 2) {
4062 		IWK_WRITE(sc, CSR_EEPROM_REG, addr<<1);
4063 		tmp = IWK_READ(sc, CSR_EEPROM_REG);
4064 		IWK_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2));
4065 
4066 		for (i = 0; i < 10; i++) {
4067 			rv = IWK_READ(sc, CSR_EEPROM_REG);
4068 			if (rv & 1)
4069 				break;
4070 			DELAY(10);
4071 		}
4072 
4073 		if (!(rv & 1)) {
4074 			cmn_err(CE_WARN, "time out when read EEPROM\n");
4075 			iwk_eep_sem_up(sc);
4076 			return (IWK_FAIL);
4077 		}
4078 
4079 		eep_p[addr/2] = rv >> 16;
4080 	}
4081 
4082 	iwk_eep_sem_up(sc);
4083 	return (IWK_SUCCESS);
4084 }
4085 
4086 /*
4087  * init mac address in ieee80211com_t struct
4088  */
4089 static void iwk_get_mac_from_eep(iwk_sc_t *sc)
4090 {
4091 	ieee80211com_t *ic = &sc->sc_ic;
4092 	struct iwk_eep *ep = &sc->sc_eep_map;
4093 
4094 	IEEE80211_ADDR_COPY(ic->ic_macaddr, ep->mac_address);
4095 
4096 	IWK_DBG((IWK_DEBUG_EEPROM, "mac:%2x:%2x:%2x:%2x:%2x:%2x\n",
4097 	    ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2],
4098 	    ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5]));
4099 }
4100 
4101 static int
4102 iwk_init(iwk_sc_t *sc)
4103 {
4104 	int qid, n, err;
4105 	clock_t clk;
4106 	uint32_t tmp;
4107 
4108 	mutex_enter(&sc->sc_glock);
4109 	sc->sc_flags &= ~IWK_F_FW_INIT;
4110 
4111 	(void) iwk_preinit(sc);
4112 
4113 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
4114 	if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) {
4115 		cmn_err(CE_WARN, "iwk_init(): Radio transmitter is off\n");
4116 		goto fail1;
4117 	}
4118 
4119 	/* init Rx ring */
4120 	iwk_mac_access_enter(sc);
4121 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
4122 
4123 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
4124 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
4125 	    sc->sc_rxq.dma_desc.cookie.dmac_address >> 8);
4126 
4127 	IWK_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG,
4128 	    ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address +
4129 	    offsetof(struct iwk_shared, val0)) >> 4));
4130 
4131 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG,
4132 	    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
4133 	    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
4134 	    IWK_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
4135 	    (RX_QUEUE_SIZE_LOG <<
4136 	    FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
4137 	iwk_mac_access_exit(sc);
4138 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG,
4139 	    (RX_QUEUE_SIZE - 1) & ~0x7);
4140 
4141 	/* init Tx rings */
4142 	iwk_mac_access_enter(sc);
4143 	iwk_reg_write(sc, SCD_TXFACT, 0);
4144 
4145 	/* keep warm page */
4146 	iwk_reg_write(sc, IWK_FH_KW_MEM_ADDR_REG,
4147 	    sc->sc_dma_kw.cookie.dmac_address >> 4);
4148 
4149 	for (qid = 0; qid < IWK_NUM_QUEUES; qid++) {
4150 		IWK_WRITE(sc, FH_MEM_CBBC_QUEUE(qid),
4151 		    sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8);
4152 		IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(qid),
4153 		    IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
4154 		    IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
4155 	}
4156 	iwk_mac_access_exit(sc);
4157 
4158 	/* clear "radio off" and "disable command" bits */
4159 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
4160 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR,
4161 	    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4162 
4163 	/* clear any pending interrupts */
4164 	IWK_WRITE(sc, CSR_INT, 0xffffffff);
4165 
4166 	/* enable interrupts */
4167 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
4168 
4169 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
4170 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
4171 
4172 	/*
4173 	 * backup ucode data part for future use.
4174 	 */
4175 	(void) memcpy(sc->sc_dma_fw_data_bak.mem_va,
4176 	    sc->sc_dma_fw_data.mem_va,
4177 	    sc->sc_dma_fw_data.alength);
4178 
4179 	for (n = 0; n < 2; n++) {
4180 		/* load firmware init segment into NIC */
4181 		err = iwk_load_firmware(sc);
4182 		if (err != IWK_SUCCESS) {
4183 			cmn_err(CE_WARN, "iwk_init(): "
4184 			    "failed to setup boot firmware\n");
4185 			continue;
4186 		}
4187 
4188 		/* now press "execute" start running */
4189 		IWK_WRITE(sc, CSR_RESET, 0);
4190 		break;
4191 	}
4192 	if (n == 2) {
4193 		cmn_err(CE_WARN, "iwk_init(): failed to load firmware\n");
4194 		goto fail1;
4195 	}
4196 	/* ..and wait at most one second for adapter to initialize */
4197 	clk = ddi_get_lbolt() + drv_usectohz(2000000);
4198 	while (!(sc->sc_flags & IWK_F_FW_INIT)) {
4199 		if (cv_timedwait(&sc->sc_fw_cv, &sc->sc_glock, clk) < 0)
4200 			break;
4201 	}
4202 	if (!(sc->sc_flags & IWK_F_FW_INIT)) {
4203 		cmn_err(CE_WARN,
4204 		    "iwk_init(): timeout waiting for firmware init\n");
4205 		goto fail1;
4206 	}
4207 
4208 	/*
4209 	 * at this point, the firmware is loaded OK, then config the hardware
4210 	 * with the ucode API, including rxon, txpower, etc.
4211 	 */
4212 	err = iwk_config(sc);
4213 	if (err) {
4214 		cmn_err(CE_WARN, "iwk_init(): failed to configure device\n");
4215 		goto fail1;
4216 	}
4217 
4218 	/* at this point, hardware may receive beacons :) */
4219 	mutex_exit(&sc->sc_glock);
4220 	return (IWK_SUCCESS);
4221 
4222 fail1:
4223 	err = IWK_FAIL;
4224 	mutex_exit(&sc->sc_glock);
4225 	return (err);
4226 }
4227 
4228 static void
4229 iwk_stop(iwk_sc_t *sc)
4230 {
4231 	uint32_t tmp;
4232 	int i;
4233 
4234 	if (!(sc->sc_flags & IWK_F_QUIESCED))
4235 		mutex_enter(&sc->sc_glock);
4236 
4237 	IWK_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
4238 	/* disable interrupts */
4239 	IWK_WRITE(sc, CSR_INT_MASK, 0);
4240 	IWK_WRITE(sc, CSR_INT, CSR_INI_SET_MASK);
4241 	IWK_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff);
4242 
4243 	/* reset all Tx rings */
4244 	for (i = 0; i < IWK_NUM_QUEUES; i++)
4245 		iwk_reset_tx_ring(sc, &sc->sc_txq[i]);
4246 
4247 	/* reset Rx ring */
4248 	iwk_reset_rx_ring(sc);
4249 
4250 	iwk_mac_access_enter(sc);
4251 	iwk_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4252 	iwk_mac_access_exit(sc);
4253 
4254 	DELAY(5);
4255 
4256 	iwk_stop_master(sc);
4257 
4258 	sc->sc_tx_timer = 0;
4259 	sc->sc_flags &= ~IWK_F_SCANNING;
4260 	sc->sc_scan_pending = 0;
4261 
4262 	tmp = IWK_READ(sc, CSR_RESET);
4263 	IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET);
4264 
4265 	if (!(sc->sc_flags & IWK_F_QUIESCED))
4266 		mutex_exit(&sc->sc_glock);
4267 }
4268 
4269 /*
4270  * Naive implementation of the Adaptive Multi Rate Retry algorithm:
4271  * "IEEE 802.11 Rate Adaptation: A Practical Approach"
4272  * Mathieu Lacage, Hossein Manshaei, Thierry Turletti
4273  * INRIA Sophia - Projet Planete
4274  * http://www-sop.inria.fr/rapports/sophia/RR-5208.html
4275  */
4276 #define	is_success(amrr)	\
4277 	((amrr)->retrycnt < (amrr)->txcnt / 10)
4278 #define	is_failure(amrr)	\
4279 	((amrr)->retrycnt > (amrr)->txcnt / 3)
4280 #define	is_enough(amrr)		\
4281 	((amrr)->txcnt > 100)
4282 #define	is_min_rate(in)		\
4283 	((in)->in_txrate == 0)
4284 #define	is_max_rate(in)		\
4285 	((in)->in_txrate == (in)->in_rates.ir_nrates - 1)
4286 #define	increase_rate(in)	\
4287 	((in)->in_txrate++)
4288 #define	decrease_rate(in)	\
4289 	((in)->in_txrate--)
4290 #define	reset_cnt(amrr)		\
4291 	{ (amrr)->txcnt = (amrr)->retrycnt = 0; }
4292 
4293 #define	IWK_AMRR_MIN_SUCCESS_THRESHOLD	 1
4294 #define	IWK_AMRR_MAX_SUCCESS_THRESHOLD	15
4295 
4296 static void
4297 iwk_amrr_init(iwk_amrr_t *amrr)
4298 {
4299 	amrr->success = 0;
4300 	amrr->recovery = 0;
4301 	amrr->txcnt = amrr->retrycnt = 0;
4302 	amrr->success_threshold = IWK_AMRR_MIN_SUCCESS_THRESHOLD;
4303 }
4304 
4305 static void
4306 iwk_amrr_timeout(iwk_sc_t *sc)
4307 {
4308 	ieee80211com_t *ic = &sc->sc_ic;
4309 
4310 	IWK_DBG((IWK_DEBUG_RATECTL, "iwk_amrr_timeout() enter\n"));
4311 	if (ic->ic_opmode == IEEE80211_M_STA)
4312 		iwk_amrr_ratectl(NULL, ic->ic_bss);
4313 	else
4314 		ieee80211_iterate_nodes(&ic->ic_sta, iwk_amrr_ratectl, NULL);
4315 	sc->sc_clk = ddi_get_lbolt();
4316 }
4317 
4318 /* ARGSUSED */
4319 static void
4320 iwk_amrr_ratectl(void *arg, ieee80211_node_t *in)
4321 {
4322 	iwk_amrr_t *amrr = (iwk_amrr_t *)in;
4323 	int need_change = 0;
4324 
4325 	if (is_success(amrr) && is_enough(amrr)) {
4326 		amrr->success++;
4327 		if (amrr->success >= amrr->success_threshold &&
4328 		    !is_max_rate(in)) {
4329 			amrr->recovery = 1;
4330 			amrr->success = 0;
4331 			increase_rate(in);
4332 			IWK_DBG((IWK_DEBUG_RATECTL,
4333 			    "AMRR increasing rate %d (txcnt=%d retrycnt=%d)\n",
4334 			    in->in_txrate, amrr->txcnt, amrr->retrycnt));
4335 			need_change = 1;
4336 		} else {
4337 			amrr->recovery = 0;
4338 		}
4339 	} else if (is_failure(amrr)) {
4340 		amrr->success = 0;
4341 		if (!is_min_rate(in)) {
4342 			if (amrr->recovery) {
4343 				amrr->success_threshold++;
4344 				if (amrr->success_threshold >
4345 				    IWK_AMRR_MAX_SUCCESS_THRESHOLD)
4346 					amrr->success_threshold =
4347 					    IWK_AMRR_MAX_SUCCESS_THRESHOLD;
4348 			} else {
4349 				amrr->success_threshold =
4350 				    IWK_AMRR_MIN_SUCCESS_THRESHOLD;
4351 			}
4352 			decrease_rate(in);
4353 			IWK_DBG((IWK_DEBUG_RATECTL,
4354 			    "AMRR decreasing rate %d (txcnt=%d retrycnt=%d)\n",
4355 			    in->in_txrate, amrr->txcnt, amrr->retrycnt));
4356 			need_change = 1;
4357 		}
4358 		amrr->recovery = 0;	/* paper is incorrect */
4359 	}
4360 
4361 	if (is_enough(amrr) || need_change)
4362 		reset_cnt(amrr);
4363 }
4364 
4365 /*
4366  * calculate 4965 chipset's kelvin temperature according to
4367  * the data of init alive and satistics notification.
4368  * The details is described in iwk_calibration.h file
4369  */
4370 static int32_t iwk_curr_tempera(iwk_sc_t *sc)
4371 {
4372 	int32_t  tempera;
4373 	int32_t  r1, r2, r3;
4374 	uint32_t  r4_u;
4375 	int32_t   r4_s;
4376 
4377 	if (iwk_is_fat_channel(sc)) {
4378 		r1 = (int32_t)(sc->sc_card_alive_init.therm_r1[1]);
4379 		r2 = (int32_t)(sc->sc_card_alive_init.therm_r2[1]);
4380 		r3 = (int32_t)(sc->sc_card_alive_init.therm_r3[1]);
4381 		r4_u = sc->sc_card_alive_init.therm_r4[1];
4382 	} else {
4383 		r1 = (int32_t)(sc->sc_card_alive_init.therm_r1[0]);
4384 		r2 = (int32_t)(sc->sc_card_alive_init.therm_r2[0]);
4385 		r3 = (int32_t)(sc->sc_card_alive_init.therm_r3[0]);
4386 		r4_u = sc->sc_card_alive_init.therm_r4[0];
4387 	}
4388 
4389 	if (sc->sc_flags & IWK_F_STATISTICS) {
4390 		r4_s = (int32_t)(sc->sc_statistics.general.temperature <<
4391 		    (31-23)) >> (31-23);
4392 	} else {
4393 		r4_s = (int32_t)(r4_u << (31-23)) >> (31-23);
4394 	}
4395 
4396 	IWK_DBG((IWK_DEBUG_CALIBRATION, "temperature R[1-4]: %d %d %d %d\n",
4397 	    r1, r2, r3, r4_s));
4398 
4399 	if (r3 == r1) {
4400 		cmn_err(CE_WARN, "iwk_curr_tempera(): "
4401 		    "failed to calculate temperature"
4402 		    "because r3 = r1\n");
4403 		return (DDI_FAILURE);
4404 	}
4405 
4406 	tempera = TEMPERATURE_CALIB_A_VAL * (r4_s - r2);
4407 	tempera /= (r3 - r1);
4408 	tempera = (tempera*97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
4409 
4410 	IWK_DBG((IWK_DEBUG_CALIBRATION, "calculated temperature: %dK, %dC\n",
4411 	    tempera, KELVIN_TO_CELSIUS(tempera)));
4412 
4413 	return (tempera);
4414 }
4415 
4416 /* Determine whether 4965 is using 2.4 GHz band */
4417 static inline int iwk_is_24G_band(iwk_sc_t *sc)
4418 {
4419 	return (sc->sc_config.flags & RXON_FLG_BAND_24G_MSK);
4420 }
4421 
4422 /* Determine whether 4965 is using fat channel */
4423 static inline int iwk_is_fat_channel(iwk_sc_t *sc)
4424 {
4425 	return ((sc->sc_config.flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
4426 	    (sc->sc_config.flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK));
4427 }
4428 
4429 /*
4430  * In MIMO mode, determine which group 4965's current channel belong to.
4431  * For more infomation about "channel group",
4432  * please refer to iwk_calibration.h file
4433  */
4434 static int iwk_txpower_grp(uint16_t channel)
4435 {
4436 	if (channel >= CALIB_IWK_TX_ATTEN_GR5_FCH &&
4437 	    channel <= CALIB_IWK_TX_ATTEN_GR5_LCH) {
4438 		return (CALIB_CH_GROUP_5);
4439 	}
4440 
4441 	if (channel >= CALIB_IWK_TX_ATTEN_GR1_FCH &&
4442 	    channel <= CALIB_IWK_TX_ATTEN_GR1_LCH) {
4443 		return (CALIB_CH_GROUP_1);
4444 	}
4445 
4446 	if (channel >= CALIB_IWK_TX_ATTEN_GR2_FCH &&
4447 	    channel <= CALIB_IWK_TX_ATTEN_GR2_LCH) {
4448 		return (CALIB_CH_GROUP_2);
4449 	}
4450 
4451 	if (channel >= CALIB_IWK_TX_ATTEN_GR3_FCH &&
4452 	    channel <= CALIB_IWK_TX_ATTEN_GR3_LCH) {
4453 		return (CALIB_CH_GROUP_3);
4454 	}
4455 
4456 	if (channel >= CALIB_IWK_TX_ATTEN_GR4_FCH &&
4457 	    channel <= CALIB_IWK_TX_ATTEN_GR4_LCH) {
4458 		return (CALIB_CH_GROUP_4);
4459 	}
4460 
4461 	cmn_err(CE_WARN, "iwk_txpower_grp(): "
4462 	    "can't find txpower group for channel %d.\n", channel);
4463 
4464 	return (DDI_FAILURE);
4465 }
4466 
4467 /* 2.4 GHz */
4468 static uint16_t iwk_eep_band_1[14] = {
4469 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
4470 };
4471 
4472 /* 5.2 GHz bands */
4473 static uint16_t iwk_eep_band_2[13] = {
4474 	183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
4475 };
4476 
4477 static uint16_t iwk_eep_band_3[12] = {
4478 	34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
4479 };
4480 
4481 static uint16_t iwk_eep_band_4[11] = {
4482 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
4483 };
4484 
4485 static uint16_t iwk_eep_band_5[6] = {
4486 	145, 149, 153, 157, 161, 165
4487 };
4488 
4489 static uint16_t iwk_eep_band_6[7] = {
4490 	1, 2, 3, 4, 5, 6, 7
4491 };
4492 
4493 static uint16_t iwk_eep_band_7[11] = {
4494 	36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
4495 };
4496 
4497 /* Get regulatory data from eeprom for a given channel */
4498 static struct iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc,
4499     uint16_t channel,
4500     int is_24G, int is_fat, int is_hi_chan)
4501 {
4502 	int32_t i;
4503 	uint16_t chan;
4504 
4505 	if (is_fat) {  /* 11n mode */
4506 
4507 		if (is_hi_chan) {
4508 			chan = channel - 4;
4509 		} else {
4510 			chan = channel;
4511 		}
4512 
4513 		for (i = 0; i < 7; i++) {
4514 			if (iwk_eep_band_6[i] == chan) {
4515 				return (&sc->sc_eep_map.band_24_channels[i]);
4516 			}
4517 		}
4518 		for (i = 0; i < 11; i++) {
4519 			if (iwk_eep_band_7[i] == chan) {
4520 				return (&sc->sc_eep_map.band_52_channels[i]);
4521 			}
4522 		}
4523 	} else if (is_24G) {  /* 2.4 GHz band */
4524 		for (i = 0; i < 14; i++) {
4525 			if (iwk_eep_band_1[i] == channel) {
4526 				return (&sc->sc_eep_map.band_1_channels[i]);
4527 			}
4528 		}
4529 	} else {  /* 5 GHz band */
4530 		for (i = 0; i < 13; i++) {
4531 			if (iwk_eep_band_2[i] == channel) {
4532 				return (&sc->sc_eep_map.band_2_channels[i]);
4533 			}
4534 		}
4535 		for (i = 0; i < 12; i++) {
4536 			if (iwk_eep_band_3[i] == channel) {
4537 				return (&sc->sc_eep_map.band_3_channels[i]);
4538 			}
4539 		}
4540 		for (i = 0; i < 11; i++) {
4541 			if (iwk_eep_band_4[i] == channel) {
4542 				return (&sc->sc_eep_map.band_4_channels[i]);
4543 			}
4544 		}
4545 		for (i = 0; i < 6; i++) {
4546 			if (iwk_eep_band_5[i] == channel) {
4547 				return (&sc->sc_eep_map.band_5_channels[i]);
4548 			}
4549 		}
4550 	}
4551 
4552 	return (NULL);
4553 }
4554 
4555 /*
4556  * Determine which subband a given channel belongs
4557  * to in 2.4 GHz or 5 GHz band
4558  */
4559 static int32_t iwk_band_number(iwk_sc_t *sc, uint16_t channel)
4560 {
4561 	int32_t b_n = -1;
4562 
4563 	for (b_n = 0; b_n < EEP_TX_POWER_BANDS; b_n++) {
4564 		if (0 == sc->sc_eep_map.calib_info.band_info_tbl[b_n].ch_from) {
4565 			continue;
4566 		}
4567 
4568 		if ((channel >=
4569 		    (uint16_t)sc->sc_eep_map.calib_info.
4570 		    band_info_tbl[b_n].ch_from) &&
4571 		    (channel <=
4572 		    (uint16_t)sc->sc_eep_map.calib_info.
4573 		    band_info_tbl[b_n].ch_to)) {
4574 			break;
4575 		}
4576 	}
4577 
4578 	return (b_n);
4579 }
4580 
4581 /* Make a special division for interpolation operation */
4582 static int iwk_division(int32_t num, int32_t denom, int32_t *res)
4583 {
4584 	int32_t sign = 1;
4585 
4586 	if (num < 0) {
4587 		sign = -sign;
4588 		num = -num;
4589 	}
4590 
4591 	if (denom < 0) {
4592 		sign = -sign;
4593 		denom = -denom;
4594 	}
4595 
4596 	*res = ((num*2 + denom) / (denom*2)) * sign;
4597 
4598 	return (IWK_SUCCESS);
4599 }
4600 
4601 /* Make interpolation operation */
4602 static int32_t iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1,
4603     int32_t x2, int32_t y2)
4604 {
4605 	int32_t val;
4606 
4607 	if (x2 == x1) {
4608 		return (y1);
4609 	} else {
4610 		(void) iwk_division((x2-x)*(y1-y2), (x2-x1), &val);
4611 		return (val + y2);
4612 	}
4613 }
4614 
4615 /* Get interpolation measurement data of a given channel for all chains. */
4616 static int iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel,
4617     struct iwk_eep_calib_channel_info *chan_info)
4618 {
4619 	int32_t ban_n;
4620 	uint32_t ch1_n, ch2_n;
4621 	int32_t c, m;
4622 	struct iwk_eep_calib_measure *m1_p, *m2_p, *m_p;
4623 
4624 	/* determine subband number */
4625 	ban_n = iwk_band_number(sc, channel);
4626 	if (ban_n >= EEP_TX_POWER_BANDS) {
4627 		return (DDI_FAILURE);
4628 	}
4629 
4630 	ch1_n =
4631 	    (uint32_t)sc->sc_eep_map.calib_info.band_info_tbl[ban_n].ch1.ch_num;
4632 	ch2_n =
4633 	    (uint32_t)sc->sc_eep_map.calib_info.band_info_tbl[ban_n].ch2.ch_num;
4634 
4635 	chan_info->ch_num = (uint8_t)channel;  /* given channel number */
4636 
4637 	/*
4638 	 * go through all chains on chipset
4639 	 */
4640 	for (c = 0; c < EEP_TX_POWER_TX_CHAINS; c++) {
4641 		/*
4642 		 * go through all factory measurements
4643 		 */
4644 		for (m = 0; m < EEP_TX_POWER_MEASUREMENTS; m++) {
4645 			m1_p =
4646 			    &(sc->sc_eep_map.calib_info.
4647 			    band_info_tbl[ban_n].ch1.measure[c][m]);
4648 			m2_p =
4649 			    &(sc->sc_eep_map.calib_info.band_info_tbl[ban_n].
4650 			    ch2.measure[c][m]);
4651 			m_p = &(chan_info->measure[c][m]);
4652 
4653 			/*
4654 			 * make interpolation to get actual
4655 			 * Tx power for given channel
4656 			 */
4657 			m_p->actual_pow = iwk_interpolate_value(channel,
4658 			    ch1_n, m1_p->actual_pow,
4659 			    ch2_n, m2_p->actual_pow);
4660 
4661 			/* make interpolation to get index into gain table */
4662 			m_p->gain_idx = iwk_interpolate_value(channel,
4663 			    ch1_n, m1_p->gain_idx,
4664 			    ch2_n, m2_p->gain_idx);
4665 
4666 			/* make interpolation to get chipset temperature */
4667 			m_p->temperature = iwk_interpolate_value(channel,
4668 			    ch1_n, m1_p->temperature,
4669 			    ch2_n, m2_p->temperature);
4670 
4671 			/*
4672 			 * make interpolation to get power
4673 			 * amp detector level
4674 			 */
4675 			m_p->pa_det = iwk_interpolate_value(channel, ch1_n,
4676 			    m1_p->pa_det,
4677 			    ch2_n, m2_p->pa_det);
4678 		}
4679 	}
4680 
4681 	return (IWK_SUCCESS);
4682 }
4683 
4684 /*
4685  * Calculate voltage compensation for Tx power. For more infomation,
4686  * please refer to iwk_calibration.h file
4687  */
4688 static int32_t iwk_voltage_compensation(int32_t eep_voltage,
4689     int32_t curr_voltage)
4690 {
4691 	int32_t vol_comp = 0;
4692 
4693 	if ((TX_POWER_IWK_ILLEGAL_VOLTAGE == eep_voltage) ||
4694 	    (TX_POWER_IWK_ILLEGAL_VOLTAGE == curr_voltage)) {
4695 		return (vol_comp);
4696 	}
4697 
4698 	(void) iwk_division(curr_voltage-eep_voltage,
4699 	    TX_POWER_IWK_VOLTAGE_CODES_PER_03V, &vol_comp);
4700 
4701 	if (curr_voltage > eep_voltage) {
4702 		vol_comp *= 2;
4703 	}
4704 	if ((vol_comp < -2) || (vol_comp > 2)) {
4705 		vol_comp = 0;
4706 	}
4707 
4708 	return (vol_comp);
4709 }
4710 
4711 /*
4712  * Thermal compensation values for txpower for various frequency ranges ...
4713  * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust
4714  */
4715 static struct iwk_txpower_tempera_comp {
4716 	int32_t degrees_per_05db_a;
4717 	int32_t degrees_per_05db_a_denom;
4718 } txpower_tempera_comp_table[CALIB_CH_GROUP_MAX] = {
4719 	{9, 2},			/* group 0 5.2, ch  34-43 */
4720 	{4, 1},			/* group 1 5.2, ch  44-70 */
4721 	{4, 1},			/* group 2 5.2, ch  71-124 */
4722 	{4, 1},			/* group 3 5.2, ch 125-200 */
4723 	{3, 1}			/* group 4 2.4, ch   all */
4724 };
4725 
4726 /*
4727  * bit-rate-dependent table to prevent Tx distortion, in half-dB units,
4728  * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates.
4729  */
4730 static int32_t back_off_table[] = {
4731 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
4732 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
4733 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
4734 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
4735 	10			/* CCK */
4736 };
4737 
4738 /* determine minimum Tx power index in gain table */
4739 static int32_t iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G)
4740 {
4741 	if ((!is_24G) && ((rate_pow_idx & 7) <= 4)) {
4742 		return (MIN_TX_GAIN_INDEX_52GHZ_EXT);
4743 	}
4744 
4745 	return (MIN_TX_GAIN_INDEX);
4746 }
4747 
4748 /*
4749  * Determine DSP and radio gain according to temperature and other factors.
4750  * This function is the majority of Tx power calibration
4751  */
4752 static int iwk_txpower_table_cmd_init(iwk_sc_t *sc,
4753     struct iwk_tx_power_db *tp_db)
4754 {
4755 	int is_24G, is_fat, is_high_chan, is_mimo;
4756 	int c, r;
4757 	int32_t target_power;
4758 	int32_t tx_grp = CALIB_CH_GROUP_MAX;
4759 	uint16_t channel;
4760 	uint8_t saturation_power;
4761 	int32_t regu_power;
4762 	int32_t curr_regu_power;
4763 	struct iwk_eep_channel *eep_chan_p;
4764 	struct iwk_eep_calib_channel_info eep_chan_calib;
4765 	int32_t eep_voltage, init_voltage;
4766 	int32_t voltage_compensation;
4767 	int32_t temperature;
4768 	int32_t degrees_per_05db_num;
4769 	int32_t degrees_per_05db_denom;
4770 	struct iwk_eep_calib_measure *measure_p;
4771 	int32_t interpo_temp;
4772 	int32_t power_limit;
4773 	int32_t atten_value;
4774 	int32_t tempera_comp[2];
4775 	int32_t interpo_gain_idx[2];
4776 	int32_t interpo_actual_pow[2];
4777 	union iwk_tx_power_dual_stream txpower_gains;
4778 	int32_t txpower_gains_idx;
4779 
4780 	channel = sc->sc_config.chan;
4781 
4782 	/* 2.4 GHz or 5 GHz band */
4783 	is_24G = iwk_is_24G_band(sc);
4784 
4785 	/* fat channel or not */
4786 	is_fat = iwk_is_fat_channel(sc);
4787 
4788 	/*
4789 	 * using low half channel number or high half channel number
4790 	 * identify fat channel
4791 	 */
4792 	if (is_fat && (sc->sc_config.flags &
4793 	    RXON_FLG_CONTROL_CHANNEL_LOC_HIGH_MSK)) {
4794 		is_high_chan = 1;
4795 	}
4796 
4797 	if ((channel > 0) && (channel < 200)) {
4798 		/* get regulatory channel data from eeprom */
4799 		eep_chan_p = iwk_get_eep_channel(sc, channel, is_24G,
4800 		    is_fat, is_high_chan);
4801 		if (NULL == eep_chan_p) {
4802 			cmn_err(CE_WARN,
4803 			    "iwk_txpower_table_cmd_init(): "
4804 			    "can't get channel infomation\n");
4805 			return (DDI_FAILURE);
4806 		}
4807 	} else {
4808 		cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4809 		    "channel(%d) isn't in proper range\n",
4810 		    channel);
4811 		return (DDI_FAILURE);
4812 	}
4813 
4814 	/* initial value of Tx power */
4815 	sc->sc_user_txpower = (int32_t)eep_chan_p->max_power_avg;
4816 	if (sc->sc_user_txpower < IWK_TX_POWER_TARGET_POWER_MIN) {
4817 		cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4818 		    "user TX power is too weak\n");
4819 		return (DDI_FAILURE);
4820 	} else if (sc->sc_user_txpower > IWK_TX_POWER_TARGET_POWER_MAX) {
4821 		cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4822 		    "user TX power is too strong\n");
4823 		return (DDI_FAILURE);
4824 	}
4825 
4826 	target_power = 2 * sc->sc_user_txpower;
4827 
4828 	/* determine which group current channel belongs to */
4829 	tx_grp = iwk_txpower_grp(channel);
4830 	if (tx_grp < 0) {
4831 		return (tx_grp);
4832 	}
4833 
4834 
4835 	if (is_fat) {
4836 		if (is_high_chan) {
4837 			channel -= 2;
4838 		} else {
4839 			channel += 2;
4840 		}
4841 	}
4842 
4843 	/* determine saturation power */
4844 	if (is_24G) {
4845 		saturation_power =
4846 		    sc->sc_eep_map.calib_info.saturation_power24;
4847 	} else {
4848 		saturation_power =
4849 		    sc->sc_eep_map.calib_info.saturation_power52;
4850 	}
4851 
4852 	if (saturation_power < IWK_TX_POWER_SATURATION_MIN ||
4853 	    saturation_power > IWK_TX_POWER_SATURATION_MAX) {
4854 		if (is_24G) {
4855 			saturation_power = IWK_TX_POWER_DEFAULT_SATURATION_24;
4856 		} else {
4857 			saturation_power = IWK_TX_POWER_DEFAULT_SATURATION_52;
4858 		}
4859 	}
4860 
4861 	/* determine regulatory power */
4862 	regu_power = (int32_t)eep_chan_p->max_power_avg * 2;
4863 	if ((regu_power < IWK_TX_POWER_REGULATORY_MIN) ||
4864 	    (regu_power > IWK_TX_POWER_REGULATORY_MAX)) {
4865 		if (is_24G) {
4866 			regu_power = IWK_TX_POWER_DEFAULT_REGULATORY_24;
4867 		} else {
4868 			regu_power = IWK_TX_POWER_DEFAULT_REGULATORY_52;
4869 		}
4870 	}
4871 
4872 	/*
4873 	 * get measurement data for current channel
4874 	 * suach as temperature,index to gain table,actual Tx power
4875 	 */
4876 	(void) iwk_channel_interpolate(sc, channel, &eep_chan_calib);
4877 
4878 	eep_voltage = (int32_t)sc->sc_eep_map.calib_info.voltage;
4879 	init_voltage = (int32_t)sc->sc_card_alive_init.voltage;
4880 
4881 	/* calculate voltage compensation to Tx power */
4882 	voltage_compensation =
4883 	    iwk_voltage_compensation(eep_voltage, init_voltage);
4884 
4885 	if (sc->sc_tempera >= IWK_TX_POWER_TEMPERATURE_MIN) {
4886 		temperature = sc->sc_tempera;
4887 	} else {
4888 		temperature = IWK_TX_POWER_TEMPERATURE_MIN;
4889 	}
4890 	if (sc->sc_tempera <= IWK_TX_POWER_TEMPERATURE_MAX) {
4891 		temperature = sc->sc_tempera;
4892 	} else {
4893 		temperature = IWK_TX_POWER_TEMPERATURE_MAX;
4894 	}
4895 	temperature = KELVIN_TO_CELSIUS(temperature);
4896 
4897 	degrees_per_05db_num =
4898 	    txpower_tempera_comp_table[tx_grp].degrees_per_05db_a;
4899 	degrees_per_05db_denom =
4900 	    txpower_tempera_comp_table[tx_grp].degrees_per_05db_a_denom;
4901 
4902 	for (c = 0; c < 2; c++) {  /* go through all chains */
4903 		measure_p = &eep_chan_calib.measure[c][1];
4904 		interpo_temp = measure_p->temperature;
4905 
4906 		/* determine temperature compensation to Tx power */
4907 		(void) iwk_division(
4908 		    (temperature-interpo_temp)*degrees_per_05db_denom,
4909 		    degrees_per_05db_num, &tempera_comp[c]);
4910 
4911 		interpo_gain_idx[c] = measure_p->gain_idx;
4912 		interpo_actual_pow[c] = measure_p->actual_pow;
4913 	}
4914 
4915 	/*
4916 	 * go through all rate entries in Tx power table
4917 	 */
4918 	for (r = 0; r < POWER_TABLE_NUM_ENTRIES; r++) {
4919 		if (r & 0x8) {
4920 			/* need to lower regulatory power for MIMO mode */
4921 			curr_regu_power = regu_power -
4922 			    IWK_TX_POWER_MIMO_REGULATORY_COMPENSATION;
4923 			is_mimo = 1;
4924 		} else {
4925 			curr_regu_power = regu_power;
4926 			is_mimo = 0;
4927 		}
4928 
4929 		power_limit = saturation_power - back_off_table[r];
4930 		if (power_limit > curr_regu_power) {
4931 			/* final Tx power limit */
4932 			power_limit = curr_regu_power;
4933 		}
4934 
4935 		if (target_power > power_limit) {
4936 			target_power = power_limit; /* final target Tx power */
4937 		}
4938 
4939 		for (c = 0; c < 2; c++) {	  /* go through all Tx chains */
4940 			if (is_mimo) {
4941 				atten_value =
4942 				    sc->sc_card_alive_init.tx_atten[tx_grp][c];
4943 			} else {
4944 				atten_value = 0;
4945 			}
4946 
4947 			/*
4948 			 * calculate index in gain table
4949 			 * this step is very important
4950 			 */
4951 			txpower_gains_idx = interpo_gain_idx[c] -
4952 			    (target_power - interpo_actual_pow[c]) -
4953 			    tempera_comp[c] - voltage_compensation +
4954 			    atten_value;
4955 
4956 			if (txpower_gains_idx <
4957 			    iwk_min_power_index(r, is_24G)) {
4958 				txpower_gains_idx =
4959 				    iwk_min_power_index(r, is_24G);
4960 			}
4961 
4962 			if (!is_24G) {
4963 				/*
4964 				 * support negative index for 5 GHz
4965 				 * band
4966 				 */
4967 				txpower_gains_idx += 9;
4968 			}
4969 
4970 			if (POWER_TABLE_CCK_ENTRY == r) {
4971 				/* for CCK mode, make necessary attenuaton */
4972 				txpower_gains_idx +=
4973 				    IWK_TX_POWER_CCK_COMPENSATION_C_STEP;
4974 			}
4975 
4976 			if (txpower_gains_idx > 107) {
4977 				txpower_gains_idx = 107;
4978 			} else if (txpower_gains_idx < 0) {
4979 				txpower_gains_idx = 0;
4980 			}
4981 
4982 			/* search DSP and radio gains in gain table */
4983 			txpower_gains.s.radio_tx_gain[c] =
4984 			    gains_table[is_24G][txpower_gains_idx].radio;
4985 			txpower_gains.s.dsp_predis_atten[c] =
4986 			    gains_table[is_24G][txpower_gains_idx].dsp;
4987 
4988 			IWK_DBG((IWK_DEBUG_CALIBRATION,
4989 			    "rate_index: %d, "
4990 			    "gain_index %d, c: %d,is_mimo: %d\n",
4991 			    r, txpower_gains_idx, c, is_mimo));
4992 		}
4993 
4994 		/* initialize Tx power table */
4995 		if (r < POWER_TABLE_NUM_HT_OFDM_ENTRIES) {
4996 			tp_db->ht_ofdm_power[r].dw = txpower_gains.dw;
4997 		} else {
4998 			tp_db->legacy_cck_power.dw = txpower_gains.dw;
4999 		}
5000 	}
5001 
5002 	return (IWK_SUCCESS);
5003 }
5004 
5005 /*
5006  * make Tx power calibration to adjust Tx power.
5007  * This is completed by sending out Tx power table command.
5008  */
5009 static int iwk_tx_power_calibration(iwk_sc_t *sc)
5010 {
5011 	iwk_tx_power_table_cmd_t cmd;
5012 	int rv;
5013 
5014 	if (sc->sc_flags & IWK_F_SCANNING) {
5015 		return (IWK_SUCCESS);
5016 	}
5017 
5018 	/* necessary initialization to Tx power table command */
5019 	cmd.band = (uint8_t)iwk_is_24G_band(sc);
5020 	cmd.channel = sc->sc_config.chan;
5021 	cmd.channel_normal_width = 0;
5022 
5023 	/* initialize Tx power table */
5024 	rv = iwk_txpower_table_cmd_init(sc, &cmd.tx_power);
5025 	if (rv) {
5026 		cmn_err(CE_NOTE, "rv= %d\n", rv);
5027 		return (rv);
5028 	}
5029 
5030 	/* send out Tx power table command */
5031 	rv = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &cmd, sizeof (cmd), 1);
5032 	if (rv) {
5033 		return (rv);
5034 	}
5035 
5036 	/* record current temperature */
5037 	sc->sc_last_tempera = sc->sc_tempera;
5038 
5039 	return (IWK_SUCCESS);
5040 }
5041 
5042 /* This function is the handler of statistics notification from uCode */
5043 static void iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc)
5044 {
5045 	int is_diff;
5046 	struct iwk_notif_statistics *statistics_p =
5047 	    (struct iwk_notif_statistics *)(desc + 1);
5048 
5049 	mutex_enter(&sc->sc_glock);
5050 
5051 	is_diff = (sc->sc_statistics.general.temperature !=
5052 	    statistics_p->general.temperature) ||
5053 	    ((sc->sc_statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK) !=
5054 	    (statistics_p->flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK));
5055 
5056 	/* update statistics data */
5057 	(void) memcpy(&sc->sc_statistics, statistics_p,
5058 	    sizeof (struct iwk_notif_statistics));
5059 
5060 	sc->sc_flags |= IWK_F_STATISTICS;
5061 
5062 	if (!(sc->sc_flags & IWK_F_SCANNING)) {
5063 		/* make Receiver gain balance calibration */
5064 		(void) iwk_rxgain_diff(sc);
5065 
5066 		/* make Receiver sensitivity calibration */
5067 		(void) iwk_rx_sens(sc);
5068 	}
5069 
5070 
5071 	if (!is_diff) {
5072 		mutex_exit(&sc->sc_glock);
5073 		return;
5074 	}
5075 
5076 	/* calibration current temperature of 4965 chipset */
5077 	sc->sc_tempera = iwk_curr_tempera(sc);
5078 
5079 	/* distinct temperature change will trigger Tx power calibration */
5080 	if (((sc->sc_tempera - sc->sc_last_tempera) >= 3) ||
5081 	    ((sc->sc_last_tempera - sc->sc_tempera) >= 3)) {
5082 		/* make Tx power calibration */
5083 		(void) iwk_tx_power_calibration(sc);
5084 	}
5085 
5086 	mutex_exit(&sc->sc_glock);
5087 }
5088 
5089 /* Determine this station is in associated state or not */
5090 static int iwk_is_associated(iwk_sc_t *sc)
5091 {
5092 	return (sc->sc_config.filter_flags & RXON_FILTER_ASSOC_MSK);
5093 }
5094 
5095 /* Make necessary preparation for Receiver gain balance calibration */
5096 static int iwk_rxgain_diff_init(iwk_sc_t *sc)
5097 {
5098 	int i, rv;
5099 	struct iwk_calibration_cmd cmd;
5100 	struct iwk_rx_gain_diff *gain_diff_p;
5101 
5102 	gain_diff_p = &sc->sc_rxgain_diff;
5103 
5104 	(void) memset(gain_diff_p, 0, sizeof (struct iwk_rx_gain_diff));
5105 	(void) memset(&cmd, 0, sizeof (struct iwk_calibration_cmd));
5106 
5107 	for (i = 0; i < RX_CHAINS_NUM; i++) {
5108 		gain_diff_p->gain_diff_chain[i] = CHAIN_GAIN_DIFF_INIT_VAL;
5109 	}
5110 
5111 	if (iwk_is_associated(sc)) {
5112 		cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
5113 		cmd.diff_gain_a = 0;
5114 		cmd.diff_gain_b = 0;
5115 		cmd.diff_gain_c = 0;
5116 
5117 		/* assume the gains of every Rx chains is balanceable */
5118 		rv = iwk_cmd(sc, REPLY_PHY_CALIBRATION_CMD, &cmd,
5119 		    sizeof (cmd), 1);
5120 		if (rv) {
5121 			return (rv);
5122 		}
5123 
5124 		gain_diff_p->state = IWK_GAIN_DIFF_ACCUMULATE;
5125 	}
5126 
5127 	return (IWK_SUCCESS);
5128 }
5129 
5130 /*
5131  * make Receiver gain balance to balance Rx gain between Rx chains
5132  * and determine which chain is disconnected
5133  */
5134 static int iwk_rxgain_diff(iwk_sc_t *sc)
5135 {
5136 	int i, is_24G, rv;
5137 	int max_beacon_chain_n;
5138 	int min_noise_chain_n;
5139 	uint16_t channel_n;
5140 	int32_t beacon_diff;
5141 	int32_t noise_diff;
5142 	uint32_t noise_chain_a, noise_chain_b, noise_chain_c;
5143 	uint32_t beacon_chain_a, beacon_chain_b, beacon_chain_c;
5144 	struct iwk_calibration_cmd cmd;
5145 	uint32_t beacon_aver[RX_CHAINS_NUM] = {0xFFFFFFFF};
5146 	uint32_t noise_aver[RX_CHAINS_NUM] = {0xFFFFFFFF};
5147 	struct statistics_rx_non_phy *rx_general_p =
5148 	    &sc->sc_statistics.rx.general;
5149 	struct iwk_rx_gain_diff *gain_diff_p = &sc->sc_rxgain_diff;
5150 
5151 	if (INTERFERENCE_DATA_AVAILABLE !=
5152 	    rx_general_p->interference_data_flag) {
5153 		return (IWK_SUCCESS);
5154 	}
5155 
5156 	if (IWK_GAIN_DIFF_ACCUMULATE != gain_diff_p->state) {
5157 		return (IWK_SUCCESS);
5158 	}
5159 
5160 	is_24G = iwk_is_24G_band(sc);
5161 	channel_n = sc->sc_config.chan;	 /* channel number */
5162 
5163 	if ((channel_n != (sc->sc_statistics.flag >> 16)) ||
5164 	    ((STATISTICS_REPLY_FLG_BAND_24G_MSK ==
5165 	    (sc->sc_statistics.flag & STATISTICS_REPLY_FLG_BAND_24G_MSK)) &&
5166 	    !is_24G)) {
5167 		return (IWK_SUCCESS);
5168 	}
5169 
5170 	/* Rx chain's noise strength from statistics notification */
5171 	noise_chain_a = rx_general_p->beacon_silence_rssi_a & 0xFF;
5172 	noise_chain_b = rx_general_p->beacon_silence_rssi_b & 0xFF;
5173 	noise_chain_c = rx_general_p->beacon_silence_rssi_c & 0xFF;
5174 
5175 	/* Rx chain's beacon strength from statistics notification */
5176 	beacon_chain_a = rx_general_p->beacon_rssi_a & 0xFF;
5177 	beacon_chain_b = rx_general_p->beacon_rssi_b & 0xFF;
5178 	beacon_chain_c = rx_general_p->beacon_rssi_c & 0xFF;
5179 
5180 	gain_diff_p->beacon_count++;
5181 
5182 	/* accumulate chain's noise strength */
5183 	gain_diff_p->noise_stren_a += noise_chain_a;
5184 	gain_diff_p->noise_stren_b += noise_chain_b;
5185 	gain_diff_p->noise_stren_c += noise_chain_c;
5186 
5187 	/* accumulate chain's beacon strength */
5188 	gain_diff_p->beacon_stren_a += beacon_chain_a;
5189 	gain_diff_p->beacon_stren_b += beacon_chain_b;
5190 	gain_diff_p->beacon_stren_c += beacon_chain_c;
5191 
5192 	if (BEACON_NUM_20 == gain_diff_p->beacon_count) {
5193 		/* calculate average beacon strength */
5194 		beacon_aver[0] = (gain_diff_p->beacon_stren_a) / BEACON_NUM_20;
5195 		beacon_aver[1] = (gain_diff_p->beacon_stren_b) / BEACON_NUM_20;
5196 		beacon_aver[2] = (gain_diff_p->beacon_stren_c) / BEACON_NUM_20;
5197 
5198 		/* calculate average noise strength */
5199 		noise_aver[0] = (gain_diff_p->noise_stren_a) / BEACON_NUM_20;
5200 		noise_aver[1] = (gain_diff_p->noise_stren_b) / BEACON_NUM_20;
5201 		noise_aver[2] = (gain_diff_p->noise_stren_b) / BEACON_NUM_20;
5202 
5203 		/* determine maximum beacon strength among 3 chains */
5204 		if ((beacon_aver[0] >= beacon_aver[1]) &&
5205 		    (beacon_aver[0] >= beacon_aver[2])) {
5206 			max_beacon_chain_n = 0;
5207 			gain_diff_p->connected_chains = 1 << 0;
5208 		} else if (beacon_aver[1] >= beacon_aver[2]) {
5209 			max_beacon_chain_n = 1;
5210 			gain_diff_p->connected_chains = 1 << 1;
5211 		} else {
5212 			max_beacon_chain_n = 2;
5213 			gain_diff_p->connected_chains = 1 << 2;
5214 		}
5215 
5216 		/* determine which chain is disconnected */
5217 		for (i = 0; i < RX_CHAINS_NUM; i++) {
5218 			if (i != max_beacon_chain_n) {
5219 				beacon_diff = beacon_aver[max_beacon_chain_n] -
5220 				    beacon_aver[i];
5221 				if (beacon_diff > MAX_ALLOWED_DIFF) {
5222 					gain_diff_p->disconnect_chain[i] = 1;
5223 				} else {
5224 					gain_diff_p->connected_chains |=
5225 					    (1 << i);
5226 				}
5227 			}
5228 		}
5229 
5230 		/*
5231 		 * if chain A and B are both disconnected,
5232 		 * assume the stronger in beacon strength is connected
5233 		 */
5234 		if (gain_diff_p->disconnect_chain[0] &&
5235 		    gain_diff_p->disconnect_chain[1]) {
5236 			if (beacon_aver[0] >= beacon_aver[1]) {
5237 				gain_diff_p->disconnect_chain[0] = 0;
5238 				gain_diff_p->connected_chains |= (1 << 0);
5239 			} else {
5240 				gain_diff_p->disconnect_chain[1] = 0;
5241 				gain_diff_p->connected_chains |= (1 << 1);
5242 			}
5243 		}
5244 
5245 		/* determine minimum noise strength among 3 chains */
5246 		if (!gain_diff_p->disconnect_chain[0]) {
5247 			min_noise_chain_n = 0;
5248 
5249 			for (i = 0; i < RX_CHAINS_NUM; i++) {
5250 				if (!gain_diff_p->disconnect_chain[i] &&
5251 				    (noise_aver[i] <=
5252 				    noise_aver[min_noise_chain_n])) {
5253 					min_noise_chain_n = i;
5254 				}
5255 
5256 			}
5257 		} else {
5258 			min_noise_chain_n = 1;
5259 
5260 			for (i = 0; i < RX_CHAINS_NUM; i++) {
5261 				if (!gain_diff_p->disconnect_chain[i] &&
5262 				    (noise_aver[i] <=
5263 				    noise_aver[min_noise_chain_n])) {
5264 					min_noise_chain_n = i;
5265 				}
5266 			}
5267 		}
5268 
5269 		gain_diff_p->gain_diff_chain[min_noise_chain_n] = 0;
5270 
5271 		/* determine gain difference between chains */
5272 		for (i = 0; i < RX_CHAINS_NUM; i++) {
5273 			if (!gain_diff_p->disconnect_chain[i] &&
5274 			    (CHAIN_GAIN_DIFF_INIT_VAL ==
5275 			    gain_diff_p->gain_diff_chain[i])) {
5276 
5277 				noise_diff = noise_aver[i] -
5278 				    noise_aver[min_noise_chain_n];
5279 				gain_diff_p->gain_diff_chain[i] =
5280 				    (uint8_t)((noise_diff * 10) / 15);
5281 
5282 				if (gain_diff_p->gain_diff_chain[i] > 3) {
5283 					gain_diff_p->gain_diff_chain[i] = 3;
5284 				}
5285 
5286 				gain_diff_p->gain_diff_chain[i] |= (1 << 2);
5287 			} else {
5288 				gain_diff_p->gain_diff_chain[i] = 0;
5289 			}
5290 		}
5291 
5292 		if (!gain_diff_p->gain_diff_send) {
5293 			gain_diff_p->gain_diff_send = 1;
5294 
5295 			(void) memset(&cmd, 0, sizeof (cmd));
5296 
5297 			cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
5298 			cmd.diff_gain_a = gain_diff_p->gain_diff_chain[0];
5299 			cmd.diff_gain_b = gain_diff_p->gain_diff_chain[1];
5300 			cmd.diff_gain_c = gain_diff_p->gain_diff_chain[2];
5301 
5302 			/*
5303 			 * send out PHY calibration command to
5304 			 * adjust every chain's Rx gain
5305 			 */
5306 			rv = iwk_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
5307 			    &cmd, sizeof (cmd), 1);
5308 			if (rv) {
5309 				return (rv);
5310 			}
5311 
5312 			gain_diff_p->state = IWK_GAIN_DIFF_CALIBRATED;
5313 		}
5314 
5315 		gain_diff_p->beacon_stren_a = 0;
5316 		gain_diff_p->beacon_stren_b = 0;
5317 		gain_diff_p->beacon_stren_c = 0;
5318 
5319 		gain_diff_p->noise_stren_a = 0;
5320 		gain_diff_p->noise_stren_b = 0;
5321 		gain_diff_p->noise_stren_c = 0;
5322 	}
5323 
5324 	return (IWK_SUCCESS);
5325 }
5326 
5327 /* Make necessary preparation for Receiver sensitivity calibration */
5328 static int iwk_rx_sens_init(iwk_sc_t *sc)
5329 {
5330 	int i, rv;
5331 	struct iwk_rx_sensitivity_cmd cmd;
5332 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5333 
5334 	(void) memset(&cmd, 0, sizeof (struct iwk_rx_sensitivity_cmd));
5335 	(void) memset(rx_sens_p, 0, sizeof (struct iwk_rx_sensitivity));
5336 
5337 	rx_sens_p->auto_corr_ofdm_x4 = 90;
5338 	rx_sens_p->auto_corr_mrc_ofdm_x4 = 170;
5339 	rx_sens_p->auto_corr_ofdm_x1 = 105;
5340 	rx_sens_p->auto_corr_mrc_ofdm_x1 = 220;
5341 
5342 	rx_sens_p->auto_corr_cck_x4 = 125;
5343 	rx_sens_p->auto_corr_mrc_cck_x4 = 200;
5344 	rx_sens_p->min_energy_det_cck = 100;
5345 
5346 	rx_sens_p->flags &= (~IWK_SENSITIVITY_CALIB_ALLOW_MSK);
5347 	rx_sens_p->flags &= (~IWK_SENSITIVITY_OFDM_UPDATE_MSK);
5348 	rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
5349 
5350 	rx_sens_p->last_bad_plcp_cnt_ofdm = 0;
5351 	rx_sens_p->last_false_alarm_cnt_ofdm = 0;
5352 	rx_sens_p->last_bad_plcp_cnt_cck = 0;
5353 	rx_sens_p->last_false_alarm_cnt_cck = 0;
5354 
5355 	rx_sens_p->cck_curr_state = IWK_TOO_MANY_FALSE_ALARM;
5356 	rx_sens_p->cck_prev_state = IWK_TOO_MANY_FALSE_ALARM;
5357 	rx_sens_p->cck_no_false_alarm_num = 0;
5358 	rx_sens_p->cck_beacon_idx = 0;
5359 
5360 	for (i = 0; i < 10; i++) {
5361 		rx_sens_p->cck_beacon_min[i] = 0;
5362 	}
5363 
5364 	rx_sens_p->cck_noise_idx = 0;
5365 	rx_sens_p->cck_noise_ref = 0;
5366 
5367 	for (i = 0; i < 20; i++) {
5368 		rx_sens_p->cck_noise_max[i] = 0;
5369 	}
5370 
5371 	rx_sens_p->cck_noise_diff = 0;
5372 	rx_sens_p->cck_no_false_alarm_num = 0;
5373 
5374 	cmd.control = IWK_SENSITIVITY_CONTROL_WORK_TABLE;
5375 
5376 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
5377 	    rx_sens_p->auto_corr_ofdm_x4;
5378 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
5379 	    rx_sens_p->auto_corr_mrc_ofdm_x4;
5380 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
5381 	    rx_sens_p->auto_corr_ofdm_x1;
5382 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
5383 	    rx_sens_p->auto_corr_mrc_ofdm_x1;
5384 
5385 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
5386 	    rx_sens_p->auto_corr_cck_x4;
5387 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
5388 	    rx_sens_p->auto_corr_mrc_cck_x4;
5389 	cmd.table[MIN_ENERGY_CCK_DET_IDX] = rx_sens_p->min_energy_det_cck;
5390 
5391 	cmd.table[MIN_ENERGY_OFDM_DET_IDX] = 100;
5392 	cmd.table[BARKER_CORR_TH_ADD_MIN_IDX] = 190;
5393 	cmd.table[BARKER_CORR_TH_ADD_MIN_MRC_IDX] = 390;
5394 	cmd.table[PTAM_ENERGY_TH_IDX] = 62;
5395 
5396 	/* at first, set up Rx to maximum sensitivity */
5397 	rv = iwk_cmd(sc, SENSITIVITY_CMD, &cmd, sizeof (cmd), 1);
5398 	if (rv) {
5399 		cmn_err(CE_WARN, "iwk_rx_sens_init(): "
5400 		    "in the process of initialization, "
5401 		    "failed to send rx sensitivity command\n");
5402 		return (rv);
5403 	}
5404 
5405 	rx_sens_p->flags |= IWK_SENSITIVITY_CALIB_ALLOW_MSK;
5406 
5407 	return (IWK_SUCCESS);
5408 }
5409 
5410 /*
5411  * make Receiver sensitivity calibration to adjust every chain's Rx sensitivity.
5412  * for more infomation, please refer to iwk_calibration.h file
5413  */
5414 static int iwk_rx_sens(iwk_sc_t *sc)
5415 {
5416 	int rv;
5417 	uint32_t actual_rx_time;
5418 	struct statistics_rx_non_phy *rx_general_p =
5419 	    &sc->sc_statistics.rx.general;
5420 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5421 	struct iwk_rx_sensitivity_cmd cmd;
5422 
5423 	if (!(rx_sens_p->flags & IWK_SENSITIVITY_CALIB_ALLOW_MSK)) {
5424 		cmn_err(CE_WARN, "iwk_rx_sens(): "
5425 		    "sensitivity initialization has not finished.\n");
5426 		return (DDI_FAILURE);
5427 	}
5428 
5429 	if (INTERFERENCE_DATA_AVAILABLE !=
5430 	    rx_general_p->interference_data_flag) {
5431 		cmn_err(CE_WARN, "iwk_rx_sens(): "
5432 		    "can't make rx sensitivity calibration,"
5433 		    "because of invalid statistics\n");
5434 		return (DDI_FAILURE);
5435 	}
5436 
5437 	actual_rx_time = rx_general_p->channel_load;
5438 	if (!actual_rx_time) {
5439 		IWK_DBG((IWK_DEBUG_CALIBRATION, "iwk_rx_sens(): "
5440 		    "can't make rx sensitivity calibration,"
5441 		    "because has not enough rx time\n"));
5442 		return (DDI_FAILURE);
5443 	}
5444 
5445 	/* make Rx sensitivity calibration for OFDM mode */
5446 	rv = iwk_ofdm_sens(sc, actual_rx_time);
5447 	if (rv) {
5448 		return (rv);
5449 	}
5450 
5451 	/* make Rx sensitivity calibration for CCK mode */
5452 	rv = iwk_cck_sens(sc, actual_rx_time);
5453 	if (rv) {
5454 		return (rv);
5455 	}
5456 
5457 	/*
5458 	 * if the sum of false alarm had not changed, nothing will be done
5459 	 */
5460 	if ((!(rx_sens_p->flags & IWK_SENSITIVITY_OFDM_UPDATE_MSK)) &&
5461 	    (!(rx_sens_p->flags & IWK_SENSITIVITY_CCK_UPDATE_MSK))) {
5462 		return (IWK_SUCCESS);
5463 	}
5464 
5465 	cmd.control = IWK_SENSITIVITY_CONTROL_WORK_TABLE;
5466 
5467 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
5468 	    rx_sens_p->auto_corr_ofdm_x4;
5469 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
5470 	    rx_sens_p->auto_corr_mrc_ofdm_x4;
5471 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
5472 	    rx_sens_p->auto_corr_ofdm_x1;
5473 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
5474 	    rx_sens_p->auto_corr_mrc_ofdm_x1;
5475 
5476 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
5477 	    rx_sens_p->auto_corr_cck_x4;
5478 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
5479 	    rx_sens_p->auto_corr_mrc_cck_x4;
5480 	cmd.table[MIN_ENERGY_CCK_DET_IDX] =
5481 	    rx_sens_p->min_energy_det_cck;
5482 
5483 	cmd.table[MIN_ENERGY_OFDM_DET_IDX] = 100;
5484 	cmd.table[BARKER_CORR_TH_ADD_MIN_IDX] = 190;
5485 	cmd.table[BARKER_CORR_TH_ADD_MIN_MRC_IDX] = 390;
5486 	cmd.table[PTAM_ENERGY_TH_IDX] = 62;
5487 
5488 	/*
5489 	 * send sensitivity command to complete actual sensitivity calibration
5490 	 */
5491 	rv = iwk_cmd(sc, SENSITIVITY_CMD, &cmd, sizeof (cmd), 1);
5492 	if (rv) {
5493 		cmn_err(CE_WARN, "iwk_rx_sens(): "
5494 		    "fail to send rx sensitivity command\n");
5495 		return (rv);
5496 	}
5497 
5498 	return (IWK_SUCCESS);
5499 
5500 }
5501 
5502 /*
5503  * make Rx sensitivity calibration for CCK mode.
5504  * This is preparing parameters for Sensitivity command
5505  */
5506 static int iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time)
5507 {
5508 	int i;
5509 	uint8_t noise_a, noise_b, noise_c;
5510 	uint8_t max_noise_abc, max_noise_20;
5511 	uint32_t beacon_a, beacon_b, beacon_c;
5512 	uint32_t min_beacon_abc, max_beacon_10;
5513 	uint32_t cck_fa, cck_bp;
5514 	uint32_t cck_sum_fa_bp;
5515 	uint32_t temp;
5516 	struct statistics_rx_non_phy *rx_general_p =
5517 	    &sc->sc_statistics.rx.general;
5518 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5519 
5520 	cck_fa = sc->sc_statistics.rx.cck.false_alarm_cnt;
5521 	cck_bp = sc->sc_statistics.rx.cck.plcp_err;
5522 
5523 	/* accumulate false alarm */
5524 	if (rx_sens_p->last_false_alarm_cnt_cck > cck_fa) {
5525 		temp = rx_sens_p->last_false_alarm_cnt_cck;
5526 		rx_sens_p->last_false_alarm_cnt_cck = cck_fa;
5527 		cck_fa += (0xFFFFFFFF - temp);
5528 	} else {
5529 		cck_fa -= rx_sens_p->last_false_alarm_cnt_cck;
5530 		rx_sens_p->last_false_alarm_cnt_cck += cck_fa;
5531 	}
5532 
5533 	/* accumulate bad plcp */
5534 	if (rx_sens_p->last_bad_plcp_cnt_cck > cck_bp) {
5535 		temp = rx_sens_p->last_bad_plcp_cnt_cck;
5536 		rx_sens_p->last_bad_plcp_cnt_cck = cck_bp;
5537 		cck_bp += (0xFFFFFFFF - temp);
5538 	} else {
5539 		cck_bp -= rx_sens_p->last_bad_plcp_cnt_cck;
5540 		rx_sens_p->last_bad_plcp_cnt_cck += cck_bp;
5541 	}
5542 
5543 	/*
5544 	 * calculate relative value
5545 	 */
5546 	cck_sum_fa_bp = (cck_fa + cck_bp) * 200 * 1024;
5547 	rx_sens_p->cck_noise_diff = 0;
5548 
5549 	noise_a =
5550 	    (uint8_t)((rx_general_p->beacon_silence_rssi_a & 0xFF00) >> 8);
5551 	noise_b =
5552 	    (uint8_t)((rx_general_p->beacon_silence_rssi_b & 0xFF00) >> 8);
5553 	noise_c =
5554 	    (uint8_t)((rx_general_p->beacon_silence_rssi_c & 0xFF00) >> 8);
5555 
5556 	beacon_a = rx_general_p->beacon_energy_a;
5557 	beacon_b = rx_general_p->beacon_energy_b;
5558 	beacon_c = rx_general_p->beacon_energy_c;
5559 
5560 	/* determine maximum noise among 3 chains */
5561 	if ((noise_a >= noise_b) && (noise_a >= noise_c)) {
5562 		max_noise_abc = noise_a;
5563 	} else if (noise_b >= noise_c) {
5564 		max_noise_abc = noise_b;
5565 	} else {
5566 		max_noise_abc = noise_c;
5567 	}
5568 
5569 	/* record maximum noise among 3 chains */
5570 	rx_sens_p->cck_noise_max[rx_sens_p->cck_noise_idx] = max_noise_abc;
5571 	rx_sens_p->cck_noise_idx++;
5572 	if (rx_sens_p->cck_noise_idx >= 20) {
5573 		rx_sens_p->cck_noise_idx = 0;
5574 	}
5575 
5576 	/* determine maximum noise among 20 max noise */
5577 	max_noise_20 = rx_sens_p->cck_noise_max[0];
5578 	for (i = 0; i < 20; i++) {
5579 		if (rx_sens_p->cck_noise_max[i] >= max_noise_20) {
5580 			max_noise_20 = rx_sens_p->cck_noise_max[i];
5581 		}
5582 	}
5583 
5584 	/* determine minimum beacon among 3 chains */
5585 	if ((beacon_a <= beacon_b) && (beacon_a <= beacon_c)) {
5586 		min_beacon_abc = beacon_a;
5587 	} else if (beacon_b <= beacon_c) {
5588 		min_beacon_abc = beacon_b;
5589 	} else {
5590 		min_beacon_abc = beacon_c;
5591 	}
5592 
5593 	/* record miminum beacon among 3 chains */
5594 	rx_sens_p->cck_beacon_min[rx_sens_p->cck_beacon_idx] = min_beacon_abc;
5595 	rx_sens_p->cck_beacon_idx++;
5596 	if (rx_sens_p->cck_beacon_idx >= 10) {
5597 		rx_sens_p->cck_beacon_idx = 0;
5598 	}
5599 
5600 	/* determine maximum beacon among 10 miminum beacon among 3 chains */
5601 	max_beacon_10 = rx_sens_p->cck_beacon_min[0];
5602 	for (i = 0; i < 10; i++) {
5603 		if (rx_sens_p->cck_beacon_min[i] >= max_beacon_10) {
5604 			max_beacon_10 = rx_sens_p->cck_beacon_min[i];
5605 		}
5606 	}
5607 
5608 	/* add a little margin */
5609 	max_beacon_10 += 6;
5610 
5611 	/* record the count of having no false alarms */
5612 	if (cck_sum_fa_bp < (5 * actual_rx_time)) {
5613 		rx_sens_p->cck_no_false_alarm_num++;
5614 	} else {
5615 		rx_sens_p->cck_no_false_alarm_num = 0;
5616 	}
5617 
5618 	/*
5619 	 * adjust parameters in sensitivity command
5620 	 * according to different status.
5621 	 * for more infomation, please refer to iwk_calibration.h file
5622 	 */
5623 	if (cck_sum_fa_bp > (50 * actual_rx_time)) {
5624 		rx_sens_p->cck_curr_state = IWK_TOO_MANY_FALSE_ALARM;
5625 
5626 		if (rx_sens_p->auto_corr_cck_x4 > 160) {
5627 			rx_sens_p->cck_noise_ref = max_noise_20;
5628 
5629 			if (rx_sens_p->min_energy_det_cck > 2) {
5630 				rx_sens_p->min_energy_det_cck -= 2;
5631 			}
5632 		}
5633 
5634 		if (rx_sens_p->auto_corr_cck_x4 < 160) {
5635 			rx_sens_p->auto_corr_cck_x4 = 160 + 1;
5636 		} else {
5637 			if ((rx_sens_p->auto_corr_cck_x4 + 3) < 200) {
5638 				rx_sens_p->auto_corr_cck_x4 += 3;
5639 			} else {
5640 				rx_sens_p->auto_corr_cck_x4 = 200;
5641 			}
5642 		}
5643 
5644 		if ((rx_sens_p->auto_corr_mrc_cck_x4 + 3) < 400) {
5645 			rx_sens_p->auto_corr_mrc_cck_x4 += 3;
5646 		} else {
5647 			rx_sens_p->auto_corr_mrc_cck_x4 = 400;
5648 		}
5649 
5650 		rx_sens_p->flags |= IWK_SENSITIVITY_CCK_UPDATE_MSK;
5651 
5652 	} else if (cck_sum_fa_bp < (5 * actual_rx_time)) {
5653 		rx_sens_p->cck_curr_state = IWK_TOO_FEW_FALSE_ALARM;
5654 
5655 		rx_sens_p->cck_noise_diff = (int32_t)rx_sens_p->cck_noise_ref -
5656 		    (int32_t)max_noise_20;
5657 
5658 		if ((rx_sens_p->cck_prev_state != IWK_TOO_MANY_FALSE_ALARM) &&
5659 		    ((rx_sens_p->cck_noise_diff > 2) ||
5660 		    (rx_sens_p->cck_no_false_alarm_num > 100))) {
5661 			if ((rx_sens_p->min_energy_det_cck + 2) < 97) {
5662 				rx_sens_p->min_energy_det_cck += 2;
5663 			} else {
5664 				rx_sens_p->min_energy_det_cck = 97;
5665 			}
5666 
5667 			if ((rx_sens_p->auto_corr_cck_x4 - 3) > 125) {
5668 				rx_sens_p->auto_corr_cck_x4 -= 3;
5669 			} else {
5670 				rx_sens_p->auto_corr_cck_x4 = 125;
5671 			}
5672 
5673 			if ((rx_sens_p->auto_corr_mrc_cck_x4 -3) > 200) {
5674 				rx_sens_p->auto_corr_mrc_cck_x4 -= 3;
5675 			} else {
5676 				rx_sens_p->auto_corr_mrc_cck_x4 = 200;
5677 			}
5678 
5679 			rx_sens_p->flags |= IWK_SENSITIVITY_CCK_UPDATE_MSK;
5680 		} else {
5681 			rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
5682 		}
5683 	} else {
5684 		rx_sens_p->cck_curr_state = IWK_GOOD_RANGE_FALSE_ALARM;
5685 
5686 		rx_sens_p->cck_noise_ref = max_noise_20;
5687 
5688 		if (IWK_TOO_MANY_FALSE_ALARM == rx_sens_p->cck_prev_state) {
5689 			rx_sens_p->min_energy_det_cck -= 8;
5690 		}
5691 
5692 		rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
5693 	}
5694 
5695 	if (rx_sens_p->min_energy_det_cck < max_beacon_10) {
5696 		rx_sens_p->min_energy_det_cck = (uint16_t)max_beacon_10;
5697 	}
5698 
5699 	rx_sens_p->cck_prev_state = rx_sens_p->cck_curr_state;
5700 
5701 	return (IWK_SUCCESS);
5702 }
5703 
5704 /*
5705  * make Rx sensitivity calibration for OFDM mode.
5706  * This is preparing parameters for Sensitivity command
5707  */
5708 static int iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time)
5709 {
5710 	uint32_t temp;
5711 	uint16_t temp1;
5712 	uint32_t ofdm_fa, ofdm_bp;
5713 	uint32_t ofdm_sum_fa_bp;
5714 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5715 
5716 	ofdm_fa = sc->sc_statistics.rx.ofdm.false_alarm_cnt;
5717 	ofdm_bp = sc->sc_statistics.rx.ofdm.plcp_err;
5718 
5719 	/* accumulate false alarm */
5720 	if (rx_sens_p->last_false_alarm_cnt_ofdm > ofdm_fa) {
5721 		temp = rx_sens_p->last_false_alarm_cnt_ofdm;
5722 		rx_sens_p->last_false_alarm_cnt_ofdm = ofdm_fa;
5723 		ofdm_fa += (0xFFFFFFFF - temp);
5724 	} else {
5725 		ofdm_fa -= rx_sens_p->last_false_alarm_cnt_ofdm;
5726 		rx_sens_p->last_false_alarm_cnt_ofdm += ofdm_fa;
5727 	}
5728 
5729 	/* accumulate bad plcp */
5730 	if (rx_sens_p->last_bad_plcp_cnt_ofdm > ofdm_bp) {
5731 		temp = rx_sens_p->last_bad_plcp_cnt_ofdm;
5732 		rx_sens_p->last_bad_plcp_cnt_ofdm = ofdm_bp;
5733 		ofdm_bp += (0xFFFFFFFF - temp);
5734 	} else {
5735 		ofdm_bp -= rx_sens_p->last_bad_plcp_cnt_ofdm;
5736 		rx_sens_p->last_bad_plcp_cnt_ofdm += ofdm_bp;
5737 	}
5738 
5739 	ofdm_sum_fa_bp = (ofdm_fa + ofdm_bp) * 200 * 1024; /* relative value */
5740 
5741 	/*
5742 	 * adjust parameter in sensitivity command according to different status
5743 	 */
5744 	if (ofdm_sum_fa_bp > (50 * actual_rx_time)) {
5745 		temp1 = rx_sens_p->auto_corr_ofdm_x4 + 1;
5746 		rx_sens_p->auto_corr_ofdm_x4 = (temp1 <= 120) ? temp1 : 120;
5747 
5748 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x4 + 1;
5749 		rx_sens_p->auto_corr_mrc_ofdm_x4 =
5750 		    (temp1 <= 210) ? temp1 : 210;
5751 
5752 		temp1 = rx_sens_p->auto_corr_ofdm_x1 + 1;
5753 		rx_sens_p->auto_corr_ofdm_x1 = (temp1 <= 140) ? temp1 : 140;
5754 
5755 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x1 + 1;
5756 		rx_sens_p->auto_corr_mrc_ofdm_x1 =
5757 		    (temp1 <= 270) ? temp1 : 270;
5758 
5759 		rx_sens_p->flags |= IWK_SENSITIVITY_OFDM_UPDATE_MSK;
5760 
5761 	} else if (ofdm_sum_fa_bp < (5 * actual_rx_time)) {
5762 		temp1 = rx_sens_p->auto_corr_ofdm_x4 - 1;
5763 		rx_sens_p->auto_corr_ofdm_x4 = (temp1 >= 85) ? temp1 : 85;
5764 
5765 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x4 - 1;
5766 		rx_sens_p->auto_corr_mrc_ofdm_x4 =
5767 		    (temp1 >= 170) ? temp1 : 170;
5768 
5769 		temp1 = rx_sens_p->auto_corr_ofdm_x1 - 1;
5770 		rx_sens_p->auto_corr_ofdm_x1 = (temp1 >= 105) ? temp1 : 105;
5771 
5772 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x1 - 1;
5773 		rx_sens_p->auto_corr_mrc_ofdm_x1 =
5774 		    (temp1 >= 220) ? temp1 : 220;
5775 
5776 		rx_sens_p->flags |= IWK_SENSITIVITY_OFDM_UPDATE_MSK;
5777 
5778 	} else {
5779 		rx_sens_p->flags &= (~IWK_SENSITIVITY_OFDM_UPDATE_MSK);
5780 	}
5781 
5782 	return (IWK_SUCCESS);
5783 }
5784 
5785 /*
5786  * additional process to management frames
5787  */
5788 static void iwk_recv_mgmt(struct ieee80211com *ic, mblk_t *mp,
5789     struct ieee80211_node *in,
5790     int subtype, int rssi, uint32_t rstamp)
5791 {
5792 	iwk_sc_t *sc = (iwk_sc_t *)ic;
5793 	struct ieee80211_frame *wh;
5794 	uint8_t index1, index2;
5795 	int err;
5796 
5797 	sc->sc_recv_mgmt(ic, mp, in, subtype, rssi, rstamp);
5798 
5799 	mutex_enter(&sc->sc_glock);
5800 	switch (subtype) {
5801 	case IEEE80211_FC0_SUBTYPE_BEACON:
5802 		if (sc->sc_ibss.ibss_beacon.syncbeacon && in == ic->ic_bss &&
5803 		    ic->ic_state == IEEE80211_S_RUN) {
5804 			if (ieee80211_beacon_update(ic, in,
5805 			    &sc->sc_ibss.ibss_beacon.iwk_boff,
5806 			    sc->sc_ibss.ibss_beacon.mp, 0)) {
5807 				bcopy(sc->sc_ibss.ibss_beacon.mp->b_rptr,
5808 				    sc->sc_ibss.ibss_beacon.beacon_cmd.
5809 				    bcon_frame,
5810 				    MBLKL(sc->sc_ibss.ibss_beacon.mp));
5811 			}
5812 			err = iwk_cmd(sc, REPLY_TX_BEACON,
5813 			    &sc->sc_ibss.ibss_beacon.beacon_cmd,
5814 			    sc->sc_ibss.ibss_beacon.beacon_cmd_len, 1);
5815 			if (err != IWK_SUCCESS) {
5816 				cmn_err(CE_WARN, "iwk_recv_mgmt(): "
5817 				    "failed to TX beacon.\n");
5818 			}
5819 			sc->sc_ibss.ibss_beacon.syncbeacon = 0;
5820 		}
5821 		if (ic->ic_opmode == IEEE80211_M_IBSS &&
5822 		    ic->ic_state == IEEE80211_S_RUN) {
5823 			wh = (struct ieee80211_frame *)mp->b_rptr;
5824 			mutex_enter(&sc->sc_ibss.node_tb_lock);
5825 			/*
5826 			 * search for node in ibss node table
5827 			 */
5828 			for (index1 = IWK_STA_ID; index1 < IWK_STATION_COUNT;
5829 			    index1++) {
5830 				if (sc->sc_ibss.ibss_node_tb[index1].used &&
5831 				    IEEE80211_ADDR_EQ(sc->sc_ibss.
5832 				    ibss_node_tb[index1].node.bssid,
5833 				    wh->i_addr2)) {
5834 					break;
5835 				}
5836 			}
5837 			/*
5838 			 * if don't find in ibss node table
5839 			 */
5840 			if (index1 >= IWK_BROADCAST_ID) {
5841 				err = iwk_clean_add_node_ibss(ic,
5842 				    wh->i_addr2, &index2);
5843 				if (err != IWK_SUCCESS) {
5844 					cmn_err(CE_WARN, "iwk_recv_mgmt(): "
5845 					    "failed to clean all nodes "
5846 					    "and add one node\n");
5847 				}
5848 			}
5849 			mutex_exit(&sc->sc_ibss.node_tb_lock);
5850 		}
5851 		break;
5852 	case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
5853 		break;
5854 	}
5855 	mutex_exit(&sc->sc_glock);
5856 }
5857 
5858 /*
5859  * 1)  log_event_table_ptr indicates base of the event log.  This traces
5860  *     a 256-entry history of uCode execution within a circular buffer.
5861  *     Its header format is:
5862  *
5863  *	uint32_t log_size;	log capacity (in number of entries)
5864  *	uint32_t type;	(1) timestamp with each entry, (0) no timestamp
5865  *	uint32_t wraps;	# times uCode has wrapped to top of circular buffer
5866  *      uint32_t write_index;	next circular buffer entry that uCode would fill
5867  *
5868  *     The header is followed by the circular buffer of log entries.  Entries
5869  *     with timestamps have the following format:
5870  *
5871  *	uint32_t event_id;     range 0 - 1500
5872  *	uint32_t timestamp;    low 32 bits of TSF (of network, if associated)
5873  *	uint32_t data;         event_id-specific data value
5874  *
5875  *     Entries without timestamps contain only event_id and data.
5876  */
5877 
5878 /*
5879  * iwk_write_event_log - Write event log to dmesg
5880  */
5881 static void iwk_write_event_log(iwk_sc_t *sc)
5882 {
5883 	uint32_t log_event_table_ptr;	/* Start address of event table */
5884 	uint32_t startptr;	/* Start address of log data */
5885 	uint32_t logptr;	/* address of log data entry */
5886 	uint32_t i, n, num_events;
5887 	uint32_t event_id, data1, data2; /* log data */
5888 
5889 	uint32_t log_size;   /* log capacity (in number of entries) */
5890 	uint32_t type;	/* (1)timestamp with each entry,(0) no timestamp */
5891 	uint32_t wraps;	/* # times uCode has wrapped to */
5892 			/* the top of circular buffer */
5893 	uint32_t idx; /* index of entry to be filled in next */
5894 
5895 	log_event_table_ptr = sc->sc_card_alive_run.log_event_table_ptr;
5896 	if (!(log_event_table_ptr)) {
5897 		IWK_DBG((IWK_DEBUG_EEPROM, "NULL event table pointer\n"));
5898 		return;
5899 	}
5900 
5901 	iwk_mac_access_enter(sc);
5902 
5903 	/* Read log header */
5904 	log_size = iwk_mem_read(sc, log_event_table_ptr);
5905 	log_event_table_ptr += sizeof (uint32_t); /* addr of "type" */
5906 	type = iwk_mem_read(sc, log_event_table_ptr);
5907 	log_event_table_ptr += sizeof (uint32_t); /* addr of "wraps" */
5908 	wraps = iwk_mem_read(sc, log_event_table_ptr);
5909 	log_event_table_ptr += sizeof (uint32_t); /* addr of "idx" */
5910 	idx = iwk_mem_read(sc, log_event_table_ptr);
5911 	startptr = log_event_table_ptr +
5912 	    sizeof (uint32_t); /* addr of start of log data */
5913 	if (!log_size & !wraps) {
5914 		IWK_DBG((IWK_DEBUG_EEPROM, "Empty log\n"));
5915 		iwk_mac_access_exit(sc);
5916 		return;
5917 	}
5918 
5919 	if (!wraps) {
5920 		num_events = idx;
5921 		logptr = startptr;
5922 	} else {
5923 		num_events = log_size - idx;
5924 		n = type ? 2 : 3;
5925 		logptr = startptr + (idx * n * sizeof (uint32_t));
5926 	}
5927 
5928 	for (i = 0; i < num_events; i++) {
5929 		event_id = iwk_mem_read(sc, logptr);
5930 		logptr += sizeof (uint32_t);
5931 		data1 = iwk_mem_read(sc, logptr);
5932 		logptr += sizeof (uint32_t);
5933 		if (type == 0) { /* no timestamp */
5934 			IWK_DBG((IWK_DEBUG_EEPROM, "Event ID=%d, Data=%x0x",
5935 			    event_id, data1));
5936 		} else { /* timestamp */
5937 			data2 = iwk_mem_read(sc, logptr);
5938 			printf("Time=%d, Event ID=%d, Data=0x%x\n",
5939 			    data1, event_id, data2);
5940 			IWK_DBG((IWK_DEBUG_EEPROM,
5941 			    "Time=%d, Event ID=%d, Data=0x%x\n",
5942 			    data1, event_id, data2));
5943 			logptr += sizeof (uint32_t);
5944 		}
5945 	}
5946 
5947 	/*
5948 	 * Print the wrapped around entries, if any
5949 	 */
5950 	if (wraps) {
5951 		logptr = startptr;
5952 		for (i = 0; i < idx; i++) {
5953 			event_id = iwk_mem_read(sc, logptr);
5954 			logptr += sizeof (uint32_t);
5955 			data1 = iwk_mem_read(sc, logptr);
5956 			logptr += sizeof (uint32_t);
5957 			if (type == 0) { /* no timestamp */
5958 				IWK_DBG((IWK_DEBUG_EEPROM,
5959 				    "Event ID=%d, Data=%x0x", event_id, data1));
5960 			} else { /* timestamp */
5961 				data2 = iwk_mem_read(sc, logptr);
5962 				IWK_DBG((IWK_DEBUG_EEPROM,
5963 				    "Time = %d, Event ID=%d, Data=0x%x\n",
5964 				    data1, event_id, data2));
5965 				logptr += sizeof (uint32_t);
5966 			}
5967 		}
5968 	}
5969 
5970 	iwk_mac_access_exit(sc);
5971 }
5972 
5973 /*
5974  * error_event_table_ptr indicates base of the error log.  This contains
5975  * information about any uCode error that occurs.  For 4965, the format is:
5976  *
5977  * uint32_t valid;        (nonzero) valid, (0) log is empty
5978  * uint32_t error_id;     type of error
5979  * uint32_t pc;           program counter
5980  * uint32_t blink1;       branch link
5981  * uint32_t blink2;       branch link
5982  * uint32_t ilink1;       interrupt link
5983  * uint32_t ilink2;       interrupt link
5984  * uint32_t data1;        error-specific data
5985  * uint32_t data2;        error-specific data
5986  * uint32_t line;         source code line of error
5987  * uint32_t bcon_time;    beacon timer
5988  * uint32_t tsf_low;      network timestamp function timer
5989  * uint32_t tsf_hi;       network timestamp function timer
5990  */
5991 /*
5992  * iwk_write_error_log - Write error log to dmesg
5993  */
5994 static void iwk_write_error_log(iwk_sc_t *sc)
5995 {
5996 	uint32_t err_ptr;	/* Start address of error log */
5997 	uint32_t valid;		/* is error log valid */
5998 
5999 	err_ptr = sc->sc_card_alive_run.error_event_table_ptr;
6000 	if (!(err_ptr)) {
6001 		IWK_DBG((IWK_DEBUG_EEPROM, "NULL error table pointer\n"));
6002 		return;
6003 	}
6004 
6005 	iwk_mac_access_enter(sc);
6006 
6007 	valid = iwk_mem_read(sc, err_ptr);
6008 	if (!(valid)) {
6009 		IWK_DBG((IWK_DEBUG_EEPROM, "Error data not valid\n"));
6010 		iwk_mac_access_exit(sc);
6011 		return;
6012 	}
6013 	err_ptr += sizeof (uint32_t);
6014 	IWK_DBG((IWK_DEBUG_EEPROM, "err=%d ", iwk_mem_read(sc, err_ptr)));
6015 	err_ptr += sizeof (uint32_t);
6016 	IWK_DBG((IWK_DEBUG_EEPROM, "pc=0x%X ", iwk_mem_read(sc, err_ptr)));
6017 	err_ptr += sizeof (uint32_t);
6018 	IWK_DBG((IWK_DEBUG_EEPROM,
6019 	    "branch link1=0x%X ", iwk_mem_read(sc, err_ptr)));
6020 	err_ptr += sizeof (uint32_t);
6021 	IWK_DBG((IWK_DEBUG_EEPROM,
6022 	    "branch link2=0x%X ", iwk_mem_read(sc, err_ptr)));
6023 	err_ptr += sizeof (uint32_t);
6024 	IWK_DBG((IWK_DEBUG_EEPROM,
6025 	    "interrupt link1=0x%X ", iwk_mem_read(sc, err_ptr)));
6026 	err_ptr += sizeof (uint32_t);
6027 	IWK_DBG((IWK_DEBUG_EEPROM,
6028 	    "interrupt link2=0x%X ", iwk_mem_read(sc, err_ptr)));
6029 	err_ptr += sizeof (uint32_t);
6030 	IWK_DBG((IWK_DEBUG_EEPROM, "data1=0x%X ", iwk_mem_read(sc, err_ptr)));
6031 	err_ptr += sizeof (uint32_t);
6032 	IWK_DBG((IWK_DEBUG_EEPROM, "data2=0x%X ", iwk_mem_read(sc, err_ptr)));
6033 	err_ptr += sizeof (uint32_t);
6034 	IWK_DBG((IWK_DEBUG_EEPROM, "line=%d ", iwk_mem_read(sc, err_ptr)));
6035 	err_ptr += sizeof (uint32_t);
6036 	IWK_DBG((IWK_DEBUG_EEPROM, "bcon_time=%d ", iwk_mem_read(sc, err_ptr)));
6037 	err_ptr += sizeof (uint32_t);
6038 	IWK_DBG((IWK_DEBUG_EEPROM, "tsf_low=%d ", iwk_mem_read(sc, err_ptr)));
6039 	err_ptr += sizeof (uint32_t);
6040 	IWK_DBG((IWK_DEBUG_EEPROM, "tsf_hi=%d\n", iwk_mem_read(sc, err_ptr)));
6041 
6042 	iwk_mac_access_exit(sc);
6043 }
6044 
6045 static int
6046 iwk_run_state_config_ibss(ieee80211com_t *ic)
6047 {
6048 	iwk_sc_t *sc = (iwk_sc_t *)ic;
6049 	ieee80211_node_t *in = ic->ic_bss;
6050 	int i, err = IWK_SUCCESS;
6051 
6052 	mutex_enter(&sc->sc_ibss.node_tb_lock);
6053 
6054 	/*
6055 	 * clean all nodes in ibss node table assure be
6056 	 * consistent with hardware
6057 	 */
6058 	for (i = IWK_STA_ID; i < IWK_STATION_COUNT; i++) {
6059 		sc->sc_ibss.ibss_node_tb[i].used = 0;
6060 		(void) memset(&sc->sc_ibss.ibss_node_tb[i].node,
6061 		    0,
6062 		    sizeof (iwk_add_sta_t));
6063 	}
6064 
6065 	sc->sc_ibss.node_number = 0;
6066 
6067 	mutex_exit(&sc->sc_ibss.node_tb_lock);
6068 
6069 	/*
6070 	 * configure RX and TX
6071 	 */
6072 	sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
6073 
6074 	sc->sc_config.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
6075 	sc->sc_config.filter_flags =
6076 	    LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
6077 	    RXON_FILTER_DIS_DECRYPT_MSK |
6078 	    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
6079 
6080 	sc->sc_config.assoc_id = 0;
6081 
6082 	IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
6083 	sc->sc_config.chan = ieee80211_chan2ieee(ic,
6084 	    in->in_chan);
6085 
6086 	if (ic->ic_curmode == IEEE80211_MODE_11B) {
6087 		sc->sc_config.cck_basic_rates = 0x03;
6088 		sc->sc_config.ofdm_basic_rates = 0;
6089 	} else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
6090 	    (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
6091 		sc->sc_config.cck_basic_rates = 0;
6092 		sc->sc_config.ofdm_basic_rates = 0x15;
6093 
6094 	} else {
6095 		sc->sc_config.cck_basic_rates = 0x0f;
6096 		sc->sc_config.ofdm_basic_rates = 0xff;
6097 	}
6098 
6099 	sc->sc_config.flags &=
6100 	    ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
6101 	    RXON_FLG_SHORT_SLOT_MSK);
6102 
6103 	if (ic->ic_flags & IEEE80211_F_SHSLOT) {
6104 		sc->sc_config.flags |=
6105 		    LE_32(RXON_FLG_SHORT_SLOT_MSK);
6106 	}
6107 
6108 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
6109 		sc->sc_config.flags |=
6110 		    LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
6111 	}
6112 
6113 	sc->sc_config.filter_flags |=
6114 	    LE_32(RXON_FILTER_ASSOC_MSK);
6115 
6116 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
6117 	    sizeof (iwk_rxon_cmd_t), 1);
6118 	if (err != IWK_SUCCESS) {
6119 		cmn_err(CE_WARN, "iwk_run_state_config_ibss(): "
6120 		    "failed to update configuration.\n");
6121 		return (err);
6122 	}
6123 
6124 	return (err);
6125 
6126 }
6127 
6128 static int
6129 iwk_run_state_config_sta(ieee80211com_t *ic)
6130 {
6131 	iwk_sc_t *sc = (iwk_sc_t *)ic;
6132 	ieee80211_node_t *in = ic->ic_bss;
6133 	int err = IWK_SUCCESS;
6134 
6135 	/* update adapter's configuration */
6136 	if (sc->sc_assoc_id != in->in_associd) {
6137 		cmn_err(CE_WARN, "iwk_run_state_config_sta(): "
6138 		    "associate ID mismatch: expected %d, "
6139 		    "got %d\n",
6140 		    in->in_associd, sc->sc_assoc_id);
6141 	}
6142 	sc->sc_config.assoc_id = in->in_associd & 0x3fff;
6143 
6144 	/*
6145 	 * short preamble/slot time are
6146 	 * negotiated when associating
6147 	 */
6148 	sc->sc_config.flags &=
6149 	    ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
6150 	    RXON_FLG_SHORT_SLOT_MSK);
6151 
6152 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
6153 		sc->sc_config.flags |=
6154 		    LE_32(RXON_FLG_SHORT_SLOT_MSK);
6155 
6156 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
6157 		sc->sc_config.flags |=
6158 		    LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
6159 
6160 	sc->sc_config.filter_flags |=
6161 	    LE_32(RXON_FILTER_ASSOC_MSK);
6162 
6163 	if (ic->ic_opmode != IEEE80211_M_STA)
6164 		sc->sc_config.filter_flags |=
6165 		    LE_32(RXON_FILTER_BCON_AWARE_MSK);
6166 
6167 	IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x"
6168 	    " filter_flags %x\n",
6169 	    sc->sc_config.chan, sc->sc_config.flags,
6170 	    sc->sc_config.filter_flags));
6171 
6172 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
6173 	    sizeof (iwk_rxon_cmd_t), 1);
6174 	if (err != IWK_SUCCESS) {
6175 		cmn_err(CE_WARN, "iwk_run_state_config_sta(): "
6176 		    "failed to update configuration\n");
6177 		return (err);
6178 	}
6179 
6180 	return (err);
6181 }
6182 
6183 static int
6184 iwk_start_tx_beacon(ieee80211com_t *ic)
6185 {
6186 	iwk_sc_t *sc = (iwk_sc_t *)ic;
6187 	ieee80211_node_t *in = ic->ic_bss;
6188 	int err = IWK_SUCCESS;
6189 	iwk_tx_beacon_cmd_t  *tx_beacon_p;
6190 	uint16_t  masks = 0;
6191 	mblk_t *mp;
6192 	int rate;
6193 
6194 	/*
6195 	 * allocate and transmit beacon frames
6196 	 */
6197 	tx_beacon_p = &sc->sc_ibss.ibss_beacon.beacon_cmd;
6198 
6199 	(void) memset(tx_beacon_p, 0,
6200 	    sizeof (iwk_tx_beacon_cmd_t));
6201 	rate = 0;
6202 	masks = 0;
6203 
6204 	tx_beacon_p->config.sta_id = IWK_BROADCAST_ID;
6205 	tx_beacon_p->config.stop_time.life_time =
6206 	    LE_32(0xffffffff);
6207 
6208 	if (sc->sc_ibss.ibss_beacon.mp != NULL) {
6209 		freemsg(sc->sc_ibss.ibss_beacon.mp);
6210 		sc->sc_ibss.ibss_beacon.mp = NULL;
6211 	}
6212 
6213 	sc->sc_ibss.ibss_beacon.mp =
6214 	    ieee80211_beacon_alloc(ic, in,
6215 	    &sc->sc_ibss.ibss_beacon.iwk_boff);
6216 	if (sc->sc_ibss.ibss_beacon.mp == NULL) {
6217 		cmn_err(CE_WARN, "iwk_start_tx_beacon(): "
6218 		    "failed to get beacon frame.\n");
6219 		return (IWK_FAIL);
6220 	}
6221 
6222 	mp = sc->sc_ibss.ibss_beacon.mp;
6223 
6224 	ASSERT(mp->b_cont == NULL);
6225 
6226 	bcopy(mp->b_rptr, tx_beacon_p->bcon_frame, MBLKL(mp));
6227 
6228 	tx_beacon_p->config.len = (uint16_t)(MBLKL(mp));
6229 	sc->sc_ibss.ibss_beacon.beacon_cmd_len =
6230 	    sizeof (iwk_tx_cmd_t) +
6231 	    4 + tx_beacon_p->config.len;
6232 
6233 	/*
6234 	 * beacons are sent at 1M
6235 	 */
6236 	rate = in->in_rates.ir_rates[0];
6237 	rate &= IEEE80211_RATE_VAL;
6238 
6239 	if (2 == rate || 4 == rate || 11 == rate ||
6240 	    22 == rate) {
6241 		masks |= RATE_MCS_CCK_MSK;
6242 	}
6243 
6244 	masks |= RATE_MCS_ANT_B_MSK;
6245 
6246 	tx_beacon_p->config.rate.r.rate_n_flags =
6247 	    (iwk_rate_to_plcp(rate) | masks);
6248 
6249 
6250 	tx_beacon_p->config.tx_flags =
6251 	    (TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK);
6252 
6253 	if (ic->ic_bss->in_tstamp.tsf != 0) {
6254 		sc->sc_ibss.ibss_beacon.syncbeacon = 1;
6255 	} else {
6256 		if (ieee80211_beacon_update(ic, in,
6257 		    &sc->sc_ibss.ibss_beacon.iwk_boff,
6258 		    mp, 0)) {
6259 			bcopy(mp->b_rptr,
6260 			    tx_beacon_p->bcon_frame,
6261 			    MBLKL(mp));
6262 		}
6263 
6264 		err = iwk_cmd(sc, REPLY_TX_BEACON,
6265 		    tx_beacon_p,
6266 		    sc->sc_ibss.ibss_beacon.beacon_cmd_len,
6267 		    1);
6268 		if (err != IWK_SUCCESS) {
6269 			cmn_err(CE_WARN, "iwk_start_tx_beacon(): "
6270 			    "failed to TX beacon.\n");
6271 			return (err);
6272 		}
6273 
6274 		sc->sc_ibss.ibss_beacon.syncbeacon = 0;
6275 	}
6276 
6277 	return (err);
6278 }
6279 
6280 static int
6281 iwk_clean_add_node_ibss(struct ieee80211com *ic,
6282     uint8_t addr[IEEE80211_ADDR_LEN], uint8_t *index2)
6283 {
6284 	iwk_sc_t *sc = (iwk_sc_t *)ic;
6285 	uint8_t	index;
6286 	iwk_add_sta_t bc_node;
6287 	iwk_link_quality_cmd_t bc_link_quality;
6288 	iwk_link_quality_cmd_t link_quality;
6289 	uint16_t  bc_masks = 0;
6290 	uint16_t  masks = 0;
6291 	int i, rate;
6292 	struct ieee80211_rateset rs;
6293 	iwk_ibss_node_t *ibss_node_p;
6294 	int err = IWK_SUCCESS;
6295 
6296 	/*
6297 	 * find a location that is not
6298 	 * used in ibss node table
6299 	 */
6300 	for (index = IWK_STA_ID;
6301 	    index < IWK_STATION_COUNT; index++) {
6302 		if (!sc->sc_ibss.ibss_node_tb[index].used) {
6303 			break;
6304 		}
6305 	}
6306 
6307 	/*
6308 	 * if have too many nodes in hardware, clean up
6309 	 */
6310 	if (index < IWK_BROADCAST_ID &&
6311 	    sc->sc_ibss.node_number >= 25) {
6312 		if (iwk_cmd(sc, REPLY_REMOVE_ALL_STA,
6313 		    NULL, 0, 1) != IWK_SUCCESS) {
6314 			cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6315 			    "failed to remove all nodes in hardware\n");
6316 			return (IWK_FAIL);
6317 		}
6318 
6319 		for (i = IWK_STA_ID; i < IWK_STATION_COUNT; i++) {
6320 			sc->sc_ibss.ibss_node_tb[i].used = 0;
6321 			(void) memset(&sc->sc_ibss.ibss_node_tb[i].node,
6322 			    0, sizeof (iwk_add_sta_t));
6323 		}
6324 
6325 		sc->sc_ibss.node_number = 0;
6326 
6327 		/*
6328 		 * add broadcast node so that we
6329 		 * can send broadcast frame
6330 		 */
6331 		(void) memset(&bc_node, 0, sizeof (bc_node));
6332 		(void) memset(bc_node.bssid, 0xff, 6);
6333 		bc_node.id = IWK_BROADCAST_ID;
6334 
6335 		err = iwk_cmd(sc, REPLY_ADD_STA, &bc_node, sizeof (bc_node), 1);
6336 		if (err != IWK_SUCCESS) {
6337 		cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6338 		    "failed to add broadcast node\n");
6339 		return (err);
6340 		}
6341 
6342 		/* TX_LINK_QUALITY cmd */
6343 		(void) memset(&bc_link_quality, 0, sizeof (bc_link_quality));
6344 		for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
6345 			bc_masks |= RATE_MCS_CCK_MSK;
6346 			bc_masks |= RATE_MCS_ANT_B_MSK;
6347 			bc_masks &= ~RATE_MCS_ANT_A_MSK;
6348 			bc_link_quality.rate_n_flags[i] =
6349 			    iwk_rate_to_plcp(2) | bc_masks;
6350 		}
6351 
6352 		bc_link_quality.general_params.single_stream_ant_msk = 2;
6353 		bc_link_quality.general_params.dual_stream_ant_msk = 3;
6354 		bc_link_quality.agg_params.agg_dis_start_th = 3;
6355 		bc_link_quality.agg_params.agg_time_limit = LE_16(4000);
6356 		bc_link_quality.sta_id = IWK_BROADCAST_ID;
6357 
6358 		err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD,
6359 		    &bc_link_quality, sizeof (bc_link_quality), 1);
6360 		if (err != IWK_SUCCESS) {
6361 			cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6362 			    "failed to config link quality table\n");
6363 			return (err);
6364 		}
6365 	}
6366 
6367 	if (index >= IWK_BROADCAST_ID) {
6368 		cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6369 		    "the count of node in hardware is too much\n");
6370 		return (IWK_FAIL);
6371 	}
6372 
6373 	/*
6374 	 * add a node into hardware
6375 	 */
6376 	ibss_node_p = &sc->sc_ibss.ibss_node_tb[index];
6377 
6378 	ibss_node_p->used = 1;
6379 
6380 	(void) memset(&ibss_node_p->node, 0,
6381 	    sizeof (iwk_add_sta_t));
6382 
6383 	IEEE80211_ADDR_COPY(ibss_node_p->node.bssid, addr);
6384 	ibss_node_p->node.id = index;
6385 	ibss_node_p->node.control = 0;
6386 	ibss_node_p->node.flags = 0;
6387 
6388 	err = iwk_cmd(sc, REPLY_ADD_STA, &ibss_node_p->node,
6389 	    sizeof (iwk_add_sta_t), 1);
6390 	if (err != IWK_SUCCESS) {
6391 		cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6392 		    "failed to add IBSS node\n");
6393 		ibss_node_p->used = 0;
6394 		(void) memset(&ibss_node_p->node, 0,
6395 		    sizeof (iwk_add_sta_t));
6396 		return (err);
6397 	}
6398 
6399 	sc->sc_ibss.node_number++;
6400 
6401 	(void) memset(&link_quality, 0, sizeof (link_quality));
6402 
6403 	rs = ic->ic_sup_rates[ieee80211_chan2mode(ic,
6404 	    ic->ic_curchan)];
6405 
6406 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
6407 		if (i < rs.ir_nrates) {
6408 			rate = rs.
6409 			    ir_rates[rs.ir_nrates - i];
6410 		} else {
6411 			rate = 2;
6412 		}
6413 
6414 		if (2 == rate || 4 == rate ||
6415 		    11 == rate || 22 == rate) {
6416 			masks |= RATE_MCS_CCK_MSK;
6417 		}
6418 
6419 		masks |= RATE_MCS_ANT_B_MSK;
6420 		masks &= ~RATE_MCS_ANT_A_MSK;
6421 
6422 		link_quality.rate_n_flags[i] =
6423 		    iwk_rate_to_plcp(rate) | masks;
6424 	}
6425 
6426 	link_quality.general_params.single_stream_ant_msk = 2;
6427 	link_quality.general_params.dual_stream_ant_msk = 3;
6428 	link_quality.agg_params.agg_dis_start_th = 3;
6429 	link_quality.agg_params.agg_time_limit = LE_16(4000);
6430 	link_quality.sta_id = ibss_node_p->node.id;
6431 
6432 	err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD,
6433 	    &link_quality, sizeof (link_quality), 1);
6434 	if (err != IWK_SUCCESS) {
6435 		cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6436 		    "failed to set up TX link quality\n");
6437 		ibss_node_p->used = 0;
6438 		(void) memset(ibss_node_p->node.bssid, 0, 6);
6439 		return (err);
6440 	}
6441 
6442 	*index2 = index;
6443 
6444 	return (err);
6445 }
6446