xref: /titanic_50/usr/src/uts/common/io/iwk/iwk2.c (revision 3e5bc1d795e8c41f3680a71e3954e72d079ee46d)
1 /*
2  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright (c) 2007, Intel Corporation
8  * All rights reserved.
9  */
10 
11 /*
12  * Copyright (c) 2006
13  * Copyright (c) 2007
14  *	Damien Bergamini <damien.bergamini@free.fr>
15  *
16  * Permission to use, copy, modify, and distribute this software for any
17  * purpose with or without fee is hereby granted, provided that the above
18  * copyright notice and this permission notice appear in all copies.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
21  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
22  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
24  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
25  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
26  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27  */
28 
29 /*
30  * Driver for Intel PRO/Wireless 4965AGN(kedron) 802.11 network adapters.
31  */
32 
33 #include <sys/types.h>
34 #include <sys/byteorder.h>
35 #include <sys/conf.h>
36 #include <sys/cmn_err.h>
37 #include <sys/stat.h>
38 #include <sys/ddi.h>
39 #include <sys/sunddi.h>
40 #include <sys/strsubr.h>
41 #include <sys/ethernet.h>
42 #include <inet/common.h>
43 #include <inet/nd.h>
44 #include <inet/mi.h>
45 #include <sys/note.h>
46 #include <sys/stream.h>
47 #include <sys/strsun.h>
48 #include <sys/modctl.h>
49 #include <sys/devops.h>
50 #include <sys/dlpi.h>
51 #include <sys/mac_provider.h>
52 #include <sys/mac_wifi.h>
53 #include <sys/net80211.h>
54 #include <sys/net80211_proto.h>
55 #include <sys/varargs.h>
56 #include <sys/policy.h>
57 #include <sys/pci.h>
58 
59 #include "iwk_calibration.h"
60 #include "iwk_hw.h"
61 #include "iwk_eeprom.h"
62 #include "iwk2_var.h"
63 #include <inet/wifi_ioctl.h>
64 
65 #ifdef DEBUG
66 #define	IWK_DEBUG_80211		(1 << 0)
67 #define	IWK_DEBUG_CMD		(1 << 1)
68 #define	IWK_DEBUG_DMA		(1 << 2)
69 #define	IWK_DEBUG_EEPROM	(1 << 3)
70 #define	IWK_DEBUG_FW		(1 << 4)
71 #define	IWK_DEBUG_HW		(1 << 5)
72 #define	IWK_DEBUG_INTR		(1 << 6)
73 #define	IWK_DEBUG_MRR		(1 << 7)
74 #define	IWK_DEBUG_PIO		(1 << 8)
75 #define	IWK_DEBUG_RX		(1 << 9)
76 #define	IWK_DEBUG_SCAN		(1 << 10)
77 #define	IWK_DEBUG_TX		(1 << 11)
78 #define	IWK_DEBUG_RATECTL	(1 << 12)
79 #define	IWK_DEBUG_RADIO		(1 << 13)
80 #define	IWK_DEBUG_RESUME	(1 << 14)
81 #define	IWK_DEBUG_CALIBRATION	(1 << 15)
82 uint32_t iwk_dbg_flags = 0;
83 #define	IWK_DBG(x) \
84 	iwk_dbg x
85 #else
86 #define	IWK_DBG(x)
87 #endif
88 
89 static void	*iwk_soft_state_p = NULL;
90 static uint8_t iwk_fw_bin [] = {
91 #include "fw-iw/iw4965.ucode.hex"
92 };
93 
94 /* DMA attributes for a shared page */
95 static ddi_dma_attr_t sh_dma_attr = {
96 	DMA_ATTR_V0,	/* version of this structure */
97 	0,		/* lowest usable address */
98 	0xffffffffU,	/* highest usable address */
99 	0xffffffffU,	/* maximum DMAable byte count */
100 	0x1000,		/* alignment in bytes */
101 	0x1000,		/* burst sizes (any?) */
102 	1,		/* minimum transfer */
103 	0xffffffffU,	/* maximum transfer */
104 	0xffffffffU,	/* maximum segment length */
105 	1,		/* maximum number of segments */
106 	1,		/* granularity */
107 	0,		/* flags (reserved) */
108 };
109 
110 /* DMA attributes for a keep warm DRAM descriptor */
111 static ddi_dma_attr_t kw_dma_attr = {
112 	DMA_ATTR_V0,	/* version of this structure */
113 	0,		/* lowest usable address */
114 	0xffffffffU,	/* highest usable address */
115 	0xffffffffU,	/* maximum DMAable byte count */
116 	0x1000,		/* alignment in bytes */
117 	0x1000,		/* burst sizes (any?) */
118 	1,		/* minimum transfer */
119 	0xffffffffU,	/* maximum transfer */
120 	0xffffffffU,	/* maximum segment length */
121 	1,		/* maximum number of segments */
122 	1,		/* granularity */
123 	0,		/* flags (reserved) */
124 };
125 
126 /* DMA attributes for a ring descriptor */
127 static ddi_dma_attr_t ring_desc_dma_attr = {
128 	DMA_ATTR_V0,	/* version of this structure */
129 	0,		/* lowest usable address */
130 	0xffffffffU,	/* highest usable address */
131 	0xffffffffU,	/* maximum DMAable byte count */
132 	0x100,		/* alignment in bytes */
133 	0x100,		/* burst sizes (any?) */
134 	1,		/* minimum transfer */
135 	0xffffffffU,	/* maximum transfer */
136 	0xffffffffU,	/* maximum segment length */
137 	1,		/* maximum number of segments */
138 	1,		/* granularity */
139 	0,		/* flags (reserved) */
140 };
141 
142 /* DMA attributes for a cmd */
143 static ddi_dma_attr_t cmd_dma_attr = {
144 	DMA_ATTR_V0,	/* version of this structure */
145 	0,		/* lowest usable address */
146 	0xffffffffU,	/* highest usable address */
147 	0xffffffffU,	/* maximum DMAable byte count */
148 	4,		/* alignment in bytes */
149 	0x100,		/* burst sizes (any?) */
150 	1,		/* minimum transfer */
151 	0xffffffffU,	/* maximum transfer */
152 	0xffffffffU,	/* maximum segment length */
153 	1,		/* maximum number of segments */
154 	1,		/* granularity */
155 	0,		/* flags (reserved) */
156 };
157 
158 /* DMA attributes for a rx buffer */
159 static ddi_dma_attr_t rx_buffer_dma_attr = {
160 	DMA_ATTR_V0,	/* version of this structure */
161 	0,		/* lowest usable address */
162 	0xffffffffU,	/* highest usable address */
163 	0xffffffffU,	/* maximum DMAable byte count */
164 	0x100,		/* alignment in bytes */
165 	0x100,		/* burst sizes (any?) */
166 	1,		/* minimum transfer */
167 	0xffffffffU,	/* maximum transfer */
168 	0xffffffffU,	/* maximum segment length */
169 	1,		/* maximum number of segments */
170 	1,		/* granularity */
171 	0,		/* flags (reserved) */
172 };
173 
174 /*
175  * DMA attributes for a tx buffer.
176  * the maximum number of segments is 4 for the hardware.
177  * now all the wifi drivers put the whole frame in a single
178  * descriptor, so we define the maximum  number of segments 1,
179  * just the same as the rx_buffer. we consider leverage the HW
180  * ability in the future, that is why we don't define rx and tx
181  * buffer_dma_attr as the same.
182  */
183 static ddi_dma_attr_t tx_buffer_dma_attr = {
184 	DMA_ATTR_V0,	/* version of this structure */
185 	0,		/* lowest usable address */
186 	0xffffffffU,	/* highest usable address */
187 	0xffffffffU,	/* maximum DMAable byte count */
188 	4,		/* alignment in bytes */
189 	0x100,		/* burst sizes (any?) */
190 	1,		/* minimum transfer */
191 	0xffffffffU,	/* maximum transfer */
192 	0xffffffffU,	/* maximum segment length */
193 	1,		/* maximum number of segments */
194 	1,		/* granularity */
195 	0,		/* flags (reserved) */
196 };
197 
198 /* DMA attributes for text and data part in the firmware */
199 static ddi_dma_attr_t fw_dma_attr = {
200 	DMA_ATTR_V0,	/* version of this structure */
201 	0,		/* lowest usable address */
202 	0xffffffffU,	/* highest usable address */
203 	0x7fffffff,	/* maximum DMAable byte count */
204 	0x10,		/* alignment in bytes */
205 	0x100,		/* burst sizes (any?) */
206 	1,		/* minimum transfer */
207 	0xffffffffU,	/* maximum transfer */
208 	0xffffffffU,	/* maximum segment length */
209 	1,		/* maximum number of segments */
210 	1,		/* granularity */
211 	0,		/* flags (reserved) */
212 };
213 
214 
215 /* regs access attributes */
216 static ddi_device_acc_attr_t iwk_reg_accattr = {
217 	DDI_DEVICE_ATTR_V0,
218 	DDI_STRUCTURE_LE_ACC,
219 	DDI_STRICTORDER_ACC,
220 	DDI_DEFAULT_ACC
221 };
222 
223 /* DMA access attributes */
224 static ddi_device_acc_attr_t iwk_dma_accattr = {
225 	DDI_DEVICE_ATTR_V0,
226 	DDI_NEVERSWAP_ACC,
227 	DDI_STRICTORDER_ACC,
228 	DDI_DEFAULT_ACC
229 };
230 
231 static int	iwk_ring_init(iwk_sc_t *);
232 static void	iwk_ring_free(iwk_sc_t *);
233 static int	iwk_alloc_shared(iwk_sc_t *);
234 static void	iwk_free_shared(iwk_sc_t *);
235 static int	iwk_alloc_kw(iwk_sc_t *);
236 static void	iwk_free_kw(iwk_sc_t *);
237 static int	iwk_alloc_fw_dma(iwk_sc_t *);
238 static void	iwk_free_fw_dma(iwk_sc_t *);
239 static int	iwk_alloc_rx_ring(iwk_sc_t *);
240 static void	iwk_reset_rx_ring(iwk_sc_t *);
241 static void	iwk_free_rx_ring(iwk_sc_t *);
242 static int	iwk_alloc_tx_ring(iwk_sc_t *, iwk_tx_ring_t *,
243     int, int);
244 static void	iwk_reset_tx_ring(iwk_sc_t *, iwk_tx_ring_t *);
245 static void	iwk_free_tx_ring(iwk_sc_t *, iwk_tx_ring_t *);
246 
247 static ieee80211_node_t *iwk_node_alloc(ieee80211com_t *);
248 static void	iwk_node_free(ieee80211_node_t *);
249 static int	iwk_newstate(ieee80211com_t *, enum ieee80211_state, int);
250 static int	iwk_key_set(ieee80211com_t *, const struct ieee80211_key *,
251     const uint8_t mac[IEEE80211_ADDR_LEN]);
252 static void	iwk_mac_access_enter(iwk_sc_t *);
253 static void	iwk_mac_access_exit(iwk_sc_t *);
254 static uint32_t	iwk_reg_read(iwk_sc_t *, uint32_t);
255 static void	iwk_reg_write(iwk_sc_t *, uint32_t, uint32_t);
256 static void	iwk_reg_write_region_4(iwk_sc_t *, uint32_t,
257 		    uint32_t *, int);
258 static int	iwk_load_firmware(iwk_sc_t *);
259 static void	iwk_rx_intr(iwk_sc_t *, iwk_rx_desc_t *,
260 		    iwk_rx_data_t *);
261 static void	iwk_tx_intr(iwk_sc_t *, iwk_rx_desc_t *,
262 		    iwk_rx_data_t *);
263 static void	iwk_cmd_intr(iwk_sc_t *, iwk_rx_desc_t *);
264 static uint_t   iwk_intr(caddr_t, caddr_t);
265 static int	iwk_eep_load(iwk_sc_t *sc);
266 static void	iwk_get_mac_from_eep(iwk_sc_t *sc);
267 static int	iwk_eep_sem_down(iwk_sc_t *sc);
268 static void	iwk_eep_sem_up(iwk_sc_t *sc);
269 static uint_t   iwk_rx_softintr(caddr_t, caddr_t);
270 static uint8_t	iwk_rate_to_plcp(int);
271 static int	iwk_cmd(iwk_sc_t *, int, const void *, int, int);
272 static void	iwk_set_led(iwk_sc_t *, uint8_t, uint8_t, uint8_t);
273 static int	iwk_hw_set_before_auth(iwk_sc_t *);
274 static int	iwk_scan(iwk_sc_t *);
275 static int	iwk_config(iwk_sc_t *);
276 static void	iwk_stop_master(iwk_sc_t *);
277 static int	iwk_power_up(iwk_sc_t *);
278 static int	iwk_preinit(iwk_sc_t *);
279 static int	iwk_init(iwk_sc_t *);
280 static void	iwk_stop(iwk_sc_t *);
281 static void	iwk_amrr_init(iwk_amrr_t *);
282 static void	iwk_amrr_timeout(iwk_sc_t *);
283 static void	iwk_amrr_ratectl(void *, ieee80211_node_t *);
284 static int32_t	iwk_curr_tempera(iwk_sc_t *sc);
285 static int	iwk_tx_power_calibration(iwk_sc_t *sc);
286 static inline int	iwk_is_24G_band(iwk_sc_t *sc);
287 static inline int	iwk_is_fat_channel(iwk_sc_t *sc);
288 static int	iwk_txpower_grp(uint16_t channel);
289 static struct	iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc,
290     uint16_t channel,
291     int is_24G, int is_fat, int is_hi_chan);
292 static int32_t	iwk_band_number(iwk_sc_t *sc, uint16_t channel);
293 static int	iwk_division(int32_t num, int32_t denom, int32_t *res);
294 static int32_t	iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1,
295     int32_t x2, int32_t y2);
296 static int	iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel,
297     struct iwk_eep_calib_channel_info *chan_info);
298 static int32_t	iwk_voltage_compensation(int32_t eep_voltage,
299     int32_t curr_voltage);
300 static int32_t	iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G);
301 static int	iwk_txpower_table_cmd_init(iwk_sc_t *sc,
302     struct iwk_tx_power_db *tp_db);
303 static void	iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc);
304 static int	iwk_is_associated(iwk_sc_t *sc);
305 static int	iwk_rxgain_diff_init(iwk_sc_t *sc);
306 static int	iwk_rxgain_diff(iwk_sc_t *sc);
307 static int	iwk_rx_sens_init(iwk_sc_t *sc);
308 static int	iwk_rx_sens(iwk_sc_t *sc);
309 static int	iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time);
310 static int	iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time);
311 
312 static void	iwk_write_event_log(iwk_sc_t *);
313 static void	iwk_write_error_log(iwk_sc_t *);
314 
315 static int	iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
316 static int	iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
317 static int	iwk_quiesce(dev_info_t *dip);
318 
319 /*
320  * GLD specific operations
321  */
322 static int	iwk_m_stat(void *arg, uint_t stat, uint64_t *val);
323 static int	iwk_m_start(void *arg);
324 static void	iwk_m_stop(void *arg);
325 static int	iwk_m_unicst(void *arg, const uint8_t *macaddr);
326 static int	iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m);
327 static int	iwk_m_promisc(void *arg, boolean_t on);
328 static mblk_t 	*iwk_m_tx(void *arg, mblk_t *mp);
329 static void	iwk_m_ioctl(void *arg, queue_t *wq, mblk_t *mp);
330 static int	iwk_m_setprop(void *arg, const char *pr_name,
331 	mac_prop_id_t wldp_pr_name, uint_t wldp_length, const void *wldp_buf);
332 static int	iwk_m_getprop(void *arg, const char *pr_name,
333 	mac_prop_id_t wldp_pr_name, uint_t pr_flags, uint_t wldp_length,
334 	void *wldp_buf, uint_t *perm);
335 static void	iwk_destroy_locks(iwk_sc_t *sc);
336 static int	iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type);
337 static void	iwk_thread(iwk_sc_t *sc);
338 
339 /*
340  * Supported rates for 802.11b/g modes (in 500Kbps unit).
341  * 11a and 11n support will be added later.
342  */
343 static const struct ieee80211_rateset iwk_rateset_11b =
344 	{ 4, { 2, 4, 11, 22 } };
345 
346 static const struct ieee80211_rateset iwk_rateset_11g =
347 	{ 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
348 
349 /*
350  * For mfthread only
351  */
352 extern pri_t minclsyspri;
353 
354 #define	DRV_NAME_4965	"iwk"
355 
356 /*
357  * Module Loading Data & Entry Points
358  */
359 DDI_DEFINE_STREAM_OPS(iwk_devops, nulldev, nulldev, iwk_attach,
360     iwk_detach, nodev, NULL, D_MP, NULL, iwk_quiesce);
361 
362 static struct modldrv iwk_modldrv = {
363 	&mod_driverops,
364 	"Intel(R) 4965AGN driver(N)",
365 	&iwk_devops
366 };
367 
368 static struct modlinkage iwk_modlinkage = {
369 	MODREV_1,
370 	&iwk_modldrv,
371 	NULL
372 };
373 
374 int
375 _init(void)
376 {
377 	int	status;
378 
379 	status = ddi_soft_state_init(&iwk_soft_state_p,
380 	    sizeof (iwk_sc_t), 1);
381 	if (status != DDI_SUCCESS)
382 		return (status);
383 
384 	mac_init_ops(&iwk_devops, DRV_NAME_4965);
385 	status = mod_install(&iwk_modlinkage);
386 	if (status != DDI_SUCCESS) {
387 		mac_fini_ops(&iwk_devops);
388 		ddi_soft_state_fini(&iwk_soft_state_p);
389 	}
390 
391 	return (status);
392 }
393 
394 int
395 _fini(void)
396 {
397 	int status;
398 
399 	status = mod_remove(&iwk_modlinkage);
400 	if (status == DDI_SUCCESS) {
401 		mac_fini_ops(&iwk_devops);
402 		ddi_soft_state_fini(&iwk_soft_state_p);
403 	}
404 
405 	return (status);
406 }
407 
408 int
409 _info(struct modinfo *mip)
410 {
411 	return (mod_info(&iwk_modlinkage, mip));
412 }
413 
414 /*
415  * Mac Call Back entries
416  */
417 mac_callbacks_t	iwk_m_callbacks = {
418 	MC_IOCTL | MC_SETPROP | MC_GETPROP,
419 	iwk_m_stat,
420 	iwk_m_start,
421 	iwk_m_stop,
422 	iwk_m_promisc,
423 	iwk_m_multicst,
424 	iwk_m_unicst,
425 	iwk_m_tx,
426 	iwk_m_ioctl,
427 	NULL,
428 	NULL,
429 	NULL,
430 	iwk_m_setprop,
431 	iwk_m_getprop
432 };
433 
434 #ifdef DEBUG
435 void
436 iwk_dbg(uint32_t flags, const char *fmt, ...)
437 {
438 	va_list	ap;
439 
440 	if (flags & iwk_dbg_flags) {
441 		va_start(ap, fmt);
442 		vcmn_err(CE_NOTE, fmt, ap);
443 		va_end(ap);
444 	}
445 }
446 #endif
447 
448 /*
449  * device operations
450  */
451 int
452 iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
453 {
454 	iwk_sc_t		*sc;
455 	ieee80211com_t	*ic;
456 	int			instance, err, i;
457 	char			strbuf[32];
458 	wifi_data_t		wd = { 0 };
459 	mac_register_t		*macp;
460 
461 	int			intr_type;
462 	int			intr_count;
463 	int			intr_actual;
464 
465 	switch (cmd) {
466 	case DDI_ATTACH:
467 		break;
468 	case DDI_RESUME:
469 		sc = ddi_get_soft_state(iwk_soft_state_p,
470 		    ddi_get_instance(dip));
471 		ASSERT(sc != NULL);
472 		if (sc->sc_flags & IWK_F_RUNNING)
473 			(void) iwk_init(sc);
474 
475 		mutex_enter(&sc->sc_glock);
476 		sc->sc_flags &= ~IWK_F_SUSPEND;
477 		sc->sc_flags |= IWK_F_LAZY_RESUME;
478 		mutex_exit(&sc->sc_glock);
479 
480 		IWK_DBG((IWK_DEBUG_RESUME, "iwk: resume\n"));
481 		return (DDI_SUCCESS);
482 	default:
483 		err = DDI_FAILURE;
484 		goto attach_fail1;
485 	}
486 
487 	instance = ddi_get_instance(dip);
488 	err = ddi_soft_state_zalloc(iwk_soft_state_p, instance);
489 	if (err != DDI_SUCCESS) {
490 		cmn_err(CE_WARN,
491 		    "iwk_attach(): failed to allocate soft state\n");
492 		goto attach_fail1;
493 	}
494 	sc = ddi_get_soft_state(iwk_soft_state_p, instance);
495 	sc->sc_dip = dip;
496 
497 	err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0,
498 	    &iwk_reg_accattr, &sc->sc_cfg_handle);
499 	if (err != DDI_SUCCESS) {
500 		cmn_err(CE_WARN,
501 		    "iwk_attach(): failed to map config spaces regs\n");
502 		goto attach_fail2;
503 	}
504 	sc->sc_rev = ddi_get8(sc->sc_cfg_handle,
505 	    (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID));
506 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0x41), 0);
507 	sc->sc_clsz = ddi_get16(sc->sc_cfg_handle,
508 	    (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ));
509 	if (!sc->sc_clsz)
510 		sc->sc_clsz = 16;
511 	sc->sc_clsz = (sc->sc_clsz << 2);
512 	sc->sc_dmabuf_sz = roundup(0x1000 + sizeof (struct ieee80211_frame) +
513 	    IEEE80211_MTU + IEEE80211_CRC_LEN +
514 	    (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
515 	    IEEE80211_WEP_CRCLEN), sc->sc_clsz);
516 	/*
517 	 * Map operating registers
518 	 */
519 	err = ddi_regs_map_setup(dip, 1, &sc->sc_base,
520 	    0, 0, &iwk_reg_accattr, &sc->sc_handle);
521 	if (err != DDI_SUCCESS) {
522 		cmn_err(CE_WARN,
523 		    "iwk_attach(): failed to map device regs\n");
524 		goto attach_fail2a;
525 	}
526 
527 	err = ddi_intr_get_supported_types(dip, &intr_type);
528 	if ((err != DDI_SUCCESS) || (!(intr_type & DDI_INTR_TYPE_FIXED))) {
529 		cmn_err(CE_WARN, "iwk_attach(): "
530 		    "Fixed type interrupt is not supported\n");
531 		goto attach_fail_intr_a;
532 	}
533 
534 	err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &intr_count);
535 	if ((err != DDI_SUCCESS) || (intr_count != 1)) {
536 		cmn_err(CE_WARN, "iwk_attach(): "
537 		    "No fixed interrupts\n");
538 		goto attach_fail_intr_a;
539 	}
540 
541 	sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
542 
543 	err = ddi_intr_alloc(dip, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0,
544 	    intr_count, &intr_actual, 0);
545 	if ((err != DDI_SUCCESS) || (intr_actual != 1)) {
546 		cmn_err(CE_WARN, "iwk_attach(): "
547 		    "ddi_intr_alloc() failed 0x%x\n", err);
548 		goto attach_fail_intr_b;
549 	}
550 
551 	err = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri);
552 	if (err != DDI_SUCCESS) {
553 		cmn_err(CE_WARN, "iwk_attach(): "
554 		    "ddi_intr_get_pri() failed 0x%x\n", err);
555 		goto attach_fail_intr_c;
556 	}
557 
558 	mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER,
559 	    DDI_INTR_PRI(sc->sc_intr_pri));
560 	mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER,
561 	    DDI_INTR_PRI(sc->sc_intr_pri));
562 	mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER,
563 	    DDI_INTR_PRI(sc->sc_intr_pri));
564 
565 	cv_init(&sc->sc_fw_cv, NULL, CV_DRIVER, NULL);
566 	cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL);
567 	cv_init(&sc->sc_tx_cv, "tx-ring", CV_DRIVER, NULL);
568 	/*
569 	 * initialize the mfthread
570 	 */
571 	cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL);
572 	sc->sc_mf_thread = NULL;
573 	sc->sc_mf_thread_switch = 0;
574 
575 	/*
576 	 * Allocate shared page.
577 	 */
578 	err = iwk_alloc_shared(sc);
579 	if (err != DDI_SUCCESS) {
580 		cmn_err(CE_WARN, "iwk_attach(): "
581 		    "failed to allocate shared page\n");
582 		goto attach_fail3;
583 	}
584 
585 	/*
586 	 * Allocate keep warm page.
587 	 */
588 	err = iwk_alloc_kw(sc);
589 	if (err != DDI_SUCCESS) {
590 		cmn_err(CE_WARN, "iwk_attach(): "
591 		    "failed to allocate keep warm page\n");
592 		goto attach_fail3a;
593 	}
594 
595 	/*
596 	 * Do some necessary hardware initializations.
597 	 */
598 	err = iwk_preinit(sc);
599 	if (err != DDI_SUCCESS) {
600 		cmn_err(CE_WARN, "iwk_attach(): "
601 		    "failed to init hardware\n");
602 		goto attach_fail4;
603 	}
604 
605 	/* initialize EEPROM */
606 	err = iwk_eep_load(sc);  /* get hardware configurations from eeprom */
607 	if (err != 0) {
608 		cmn_err(CE_WARN, "iwk_attach(): failed to load eeprom\n");
609 		goto attach_fail4;
610 	}
611 
612 	if (sc->sc_eep_map.calib_version < EEP_TX_POWER_VERSION_NEW) {
613 		cmn_err(CE_WARN, "older EEPROM detected\n");
614 		goto attach_fail4;
615 	}
616 
617 	iwk_get_mac_from_eep(sc);
618 
619 	err = iwk_ring_init(sc);
620 	if (err != DDI_SUCCESS) {
621 		cmn_err(CE_WARN, "iwk_attach(): "
622 		    "failed to allocate and initialize ring\n");
623 		goto attach_fail4;
624 	}
625 
626 	sc->sc_hdr = (iwk_firmware_hdr_t *)iwk_fw_bin;
627 
628 	err = iwk_alloc_fw_dma(sc);
629 	if (err != DDI_SUCCESS) {
630 		cmn_err(CE_WARN, "iwk_attach(): "
631 		    "failed to allocate firmware dma\n");
632 		goto attach_fail5;
633 	}
634 
635 	/*
636 	 * Initialize the wifi part, which will be used by
637 	 * generic layer
638 	 */
639 	ic = &sc->sc_ic;
640 	ic->ic_phytype  = IEEE80211_T_OFDM;
641 	ic->ic_opmode   = IEEE80211_M_STA; /* default to BSS mode */
642 	ic->ic_state    = IEEE80211_S_INIT;
643 	ic->ic_maxrssi  = 100; /* experimental number */
644 	ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT |
645 	    IEEE80211_C_PMGT | IEEE80211_C_SHSLOT;
646 	/*
647 	 * use software WEP and TKIP, hardware CCMP;
648 	 */
649 	ic->ic_caps |= IEEE80211_C_AES_CCM;
650 	/*
651 	 * Support WPA/WPA2
652 	 */
653 	ic->ic_caps |= IEEE80211_C_WPA;
654 
655 	/* set supported .11b and .11g rates */
656 	ic->ic_sup_rates[IEEE80211_MODE_11B] = iwk_rateset_11b;
657 	ic->ic_sup_rates[IEEE80211_MODE_11G] = iwk_rateset_11g;
658 
659 	/* set supported .11b and .11g channels (1 through 11) */
660 	for (i = 1; i <= 11; i++) {
661 		ic->ic_sup_channels[i].ich_freq =
662 		    ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
663 		ic->ic_sup_channels[i].ich_flags =
664 		    IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
665 		    IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ |
666 		    IEEE80211_CHAN_PASSIVE;
667 	}
668 
669 	ic->ic_xmit = iwk_send;
670 	/*
671 	 * init Wifi layer
672 	 */
673 	ieee80211_attach(ic);
674 
675 	/*
676 	 * different instance has different WPA door
677 	 */
678 	(void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
679 	    ddi_driver_name(dip),
680 	    ddi_get_instance(dip));
681 
682 	/*
683 	 * Override 80211 default routines
684 	 */
685 	sc->sc_newstate = ic->ic_newstate;
686 	ic->ic_newstate = iwk_newstate;
687 	sc->sc_recv_mgmt = ic->ic_recv_mgmt;
688 	ic->ic_node_alloc = iwk_node_alloc;
689 	ic->ic_node_free = iwk_node_free;
690 	ic->ic_crypto.cs_key_set = iwk_key_set;
691 	ieee80211_media_init(ic);
692 	/*
693 	 * initialize default tx key
694 	 */
695 	ic->ic_def_txkey = 0;
696 	err = ddi_intr_add_softint(dip, &sc->sc_soft_hdl, DDI_INTR_SOFTPRI_MAX,
697 	    iwk_rx_softintr, (caddr_t)sc);
698 	if (err != DDI_SUCCESS) {
699 		cmn_err(CE_WARN, "iwk_attach(): "
700 		    "add soft interrupt failed\n");
701 		goto attach_fail7;
702 	}
703 
704 	/*
705 	 * Add the interrupt handler
706 	 */
707 	err = ddi_intr_add_handler(sc->sc_intr_htable[0], iwk_intr,
708 	    (caddr_t)sc, NULL);
709 	if (err != DDI_SUCCESS) {
710 		cmn_err(CE_WARN, "iwk_attach(): "
711 		    "ddi_intr_add_handle() failed\n");
712 		goto attach_fail8;
713 	}
714 
715 	err = ddi_intr_enable(sc->sc_intr_htable[0]);
716 	if (err != DDI_SUCCESS) {
717 		cmn_err(CE_WARN, "iwk_attach(): "
718 		    "ddi_intr_enable() failed\n");
719 		goto attach_fail_intr_d;
720 	}
721 
722 	/*
723 	 * Initialize pointer to device specific functions
724 	 */
725 	wd.wd_secalloc = WIFI_SEC_NONE;
726 	wd.wd_opmode = ic->ic_opmode;
727 	IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr);
728 
729 	macp = mac_alloc(MAC_VERSION);
730 	if (err != DDI_SUCCESS) {
731 		cmn_err(CE_WARN,
732 		    "iwk_attach(): failed to do mac_alloc()\n");
733 		goto attach_fail9;
734 	}
735 
736 	macp->m_type_ident	= MAC_PLUGIN_IDENT_WIFI;
737 	macp->m_driver		= sc;
738 	macp->m_dip		= dip;
739 	macp->m_src_addr	= ic->ic_macaddr;
740 	macp->m_callbacks	= &iwk_m_callbacks;
741 	macp->m_min_sdu		= 0;
742 	macp->m_max_sdu		= IEEE80211_MTU;
743 	macp->m_pdata		= &wd;
744 	macp->m_pdata_size	= sizeof (wd);
745 
746 	/*
747 	 * Register the macp to mac
748 	 */
749 	err = mac_register(macp, &ic->ic_mach);
750 	mac_free(macp);
751 	if (err != DDI_SUCCESS) {
752 		cmn_err(CE_WARN,
753 		    "iwk_attach(): failed to do mac_register()\n");
754 		goto attach_fail9;
755 	}
756 
757 	/*
758 	 * Create minor node of type DDI_NT_NET_WIFI
759 	 */
760 	(void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_4965"%d", instance);
761 	err = ddi_create_minor_node(dip, strbuf, S_IFCHR,
762 	    instance + 1, DDI_NT_NET_WIFI, 0);
763 	if (err != DDI_SUCCESS)
764 		cmn_err(CE_WARN,
765 		    "iwk_attach(): failed to do ddi_create_minor_node()\n");
766 
767 	/*
768 	 * Notify link is down now
769 	 */
770 	mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
771 
772 	/*
773 	 * create the mf thread to handle the link status,
774 	 * recovery fatal error, etc.
775 	 */
776 	sc->sc_mf_thread_switch = 1;
777 	if (sc->sc_mf_thread == NULL)
778 		sc->sc_mf_thread = thread_create((caddr_t)NULL, 0,
779 		    iwk_thread, sc, 0, &p0, TS_RUN, minclsyspri);
780 
781 	sc->sc_flags |= IWK_F_ATTACHED;
782 
783 	return (DDI_SUCCESS);
784 attach_fail9:
785 	(void) ddi_intr_disable(sc->sc_intr_htable[0]);
786 attach_fail_intr_d:
787 	(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
788 
789 attach_fail8:
790 	(void) ddi_intr_remove_softint(sc->sc_soft_hdl);
791 	sc->sc_soft_hdl = NULL;
792 attach_fail7:
793 	ieee80211_detach(ic);
794 attach_fail6:
795 	iwk_free_fw_dma(sc);
796 attach_fail5:
797 	iwk_ring_free(sc);
798 attach_fail4:
799 	iwk_free_kw(sc);
800 attach_fail3a:
801 	iwk_free_shared(sc);
802 attach_fail3:
803 	iwk_destroy_locks(sc);
804 attach_fail_intr_c:
805 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
806 attach_fail_intr_b:
807 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
808 attach_fail_intr_a:
809 	ddi_regs_map_free(&sc->sc_handle);
810 attach_fail2a:
811 	ddi_regs_map_free(&sc->sc_cfg_handle);
812 attach_fail2:
813 	ddi_soft_state_free(iwk_soft_state_p, instance);
814 attach_fail1:
815 	return (err);
816 }
817 
818 int
819 iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
820 {
821 	iwk_sc_t	*sc;
822 	int err;
823 
824 	sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip));
825 	ASSERT(sc != NULL);
826 
827 	switch (cmd) {
828 	case DDI_DETACH:
829 		break;
830 	case DDI_SUSPEND:
831 		mutex_enter(&sc->sc_glock);
832 		sc->sc_flags |= IWK_F_SUSPEND;
833 		mutex_exit(&sc->sc_glock);
834 		if (sc->sc_flags & IWK_F_RUNNING) {
835 			iwk_stop(sc);
836 		}
837 
838 		IWK_DBG((IWK_DEBUG_RESUME, "iwk: suspend\n"));
839 		return (DDI_SUCCESS);
840 	default:
841 		return (DDI_FAILURE);
842 	}
843 
844 	if (!(sc->sc_flags & IWK_F_ATTACHED))
845 		return (DDI_FAILURE);
846 
847 	err = mac_disable(sc->sc_ic.ic_mach);
848 	if (err != DDI_SUCCESS)
849 		return (err);
850 
851 	/*
852 	 * Destroy the mf_thread
853 	 */
854 	mutex_enter(&sc->sc_mt_lock);
855 	sc->sc_mf_thread_switch = 0;
856 	while (sc->sc_mf_thread != NULL) {
857 		if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0)
858 			break;
859 	}
860 	mutex_exit(&sc->sc_mt_lock);
861 
862 	iwk_stop(sc);
863 	DELAY(500000);
864 
865 	/*
866 	 * Unregiste from the MAC layer subsystem
867 	 */
868 	(void) mac_unregister(sc->sc_ic.ic_mach);
869 
870 	mutex_enter(&sc->sc_glock);
871 	iwk_free_fw_dma(sc);
872 	iwk_ring_free(sc);
873 	iwk_free_kw(sc);
874 	iwk_free_shared(sc);
875 	mutex_exit(&sc->sc_glock);
876 
877 	(void) ddi_intr_disable(sc->sc_intr_htable[0]);
878 	(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
879 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
880 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
881 
882 	(void) ddi_intr_remove_softint(sc->sc_soft_hdl);
883 	sc->sc_soft_hdl = NULL;
884 
885 	/*
886 	 * detach ieee80211
887 	 */
888 	ieee80211_detach(&sc->sc_ic);
889 
890 	iwk_destroy_locks(sc);
891 
892 	ddi_regs_map_free(&sc->sc_handle);
893 	ddi_regs_map_free(&sc->sc_cfg_handle);
894 	ddi_remove_minor_node(dip, NULL);
895 	ddi_soft_state_free(iwk_soft_state_p, ddi_get_instance(dip));
896 
897 	return (DDI_SUCCESS);
898 }
899 
900 /*
901  * quiesce(9E) entry point.
902  *
903  * This function is called when the system is single-threaded at high
904  * PIL with preemption disabled. Therefore, this function must not be
905  * blocked.
906  *
907  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
908  * DDI_FAILURE indicates an error condition and should almost never happen.
909  */
910 int
911 iwk_quiesce(dev_info_t *dip)
912 {
913 	iwk_sc_t	*sc;
914 
915 	sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip));
916 	ASSERT(sc != NULL);
917 
918 	/* no message prints and no lock accquisition */
919 #ifdef DEBUG
920 	iwk_dbg_flags = 0;
921 #endif
922 	sc->sc_flags |= IWK_F_QUIESCED;
923 
924 	iwk_stop(sc);
925 
926 	return (DDI_SUCCESS);
927 }
928 
929 static void
930 iwk_destroy_locks(iwk_sc_t *sc)
931 {
932 	cv_destroy(&sc->sc_mt_cv);
933 	mutex_destroy(&sc->sc_mt_lock);
934 	cv_destroy(&sc->sc_tx_cv);
935 	cv_destroy(&sc->sc_cmd_cv);
936 	cv_destroy(&sc->sc_fw_cv);
937 	mutex_destroy(&sc->sc_tx_lock);
938 	mutex_destroy(&sc->sc_glock);
939 }
940 
941 /*
942  * Allocate an area of memory and a DMA handle for accessing it
943  */
944 static int
945 iwk_alloc_dma_mem(iwk_sc_t *sc, size_t memsize,
946     ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p,
947     uint_t dma_flags, iwk_dma_t *dma_p)
948 {
949 	caddr_t vaddr;
950 	int err;
951 
952 	/*
953 	 * Allocate handle
954 	 */
955 	err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p,
956 	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
957 	if (err != DDI_SUCCESS) {
958 		dma_p->dma_hdl = NULL;
959 		return (DDI_FAILURE);
960 	}
961 
962 	/*
963 	 * Allocate memory
964 	 */
965 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
966 	    dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
967 	    DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
968 	if (err != DDI_SUCCESS) {
969 		ddi_dma_free_handle(&dma_p->dma_hdl);
970 		dma_p->dma_hdl = NULL;
971 		dma_p->acc_hdl = NULL;
972 		return (DDI_FAILURE);
973 	}
974 
975 	/*
976 	 * Bind the two together
977 	 */
978 	dma_p->mem_va = vaddr;
979 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
980 	    vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
981 	    &dma_p->cookie, &dma_p->ncookies);
982 	if (err != DDI_DMA_MAPPED) {
983 		ddi_dma_mem_free(&dma_p->acc_hdl);
984 		ddi_dma_free_handle(&dma_p->dma_hdl);
985 		dma_p->acc_hdl = NULL;
986 		dma_p->dma_hdl = NULL;
987 		return (DDI_FAILURE);
988 	}
989 
990 	dma_p->nslots = ~0U;
991 	dma_p->size = ~0U;
992 	dma_p->token = ~0U;
993 	dma_p->offset = 0;
994 	return (DDI_SUCCESS);
995 }
996 
997 /*
998  * Free one allocated area of DMAable memory
999  */
1000 static void
1001 iwk_free_dma_mem(iwk_dma_t *dma_p)
1002 {
1003 	if (dma_p->dma_hdl != NULL) {
1004 		if (dma_p->ncookies) {
1005 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
1006 			dma_p->ncookies = 0;
1007 		}
1008 		ddi_dma_free_handle(&dma_p->dma_hdl);
1009 		dma_p->dma_hdl = NULL;
1010 	}
1011 
1012 	if (dma_p->acc_hdl != NULL) {
1013 		ddi_dma_mem_free(&dma_p->acc_hdl);
1014 		dma_p->acc_hdl = NULL;
1015 	}
1016 }
1017 
1018 /*
1019  *
1020  */
1021 static int
1022 iwk_alloc_fw_dma(iwk_sc_t *sc)
1023 {
1024 	int err = DDI_SUCCESS;
1025 	iwk_dma_t *dma_p;
1026 	char *t;
1027 
1028 	/*
1029 	 * firmware image layout:
1030 	 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->|
1031 	 */
1032 	t = (char *)(sc->sc_hdr + 1);
1033 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz),
1034 	    &fw_dma_attr, &iwk_dma_accattr,
1035 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1036 	    &sc->sc_dma_fw_text);
1037 	dma_p = &sc->sc_dma_fw_text;
1038 	IWK_DBG((IWK_DEBUG_DMA, "text[ncookies:%d addr:%lx size:%lx]\n",
1039 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1040 	    dma_p->cookie.dmac_size));
1041 	if (err != DDI_SUCCESS) {
1042 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1043 		    " text dma memory");
1044 		goto fail;
1045 	}
1046 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->textsz));
1047 
1048 	t += LE_32(sc->sc_hdr->textsz);
1049 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1050 	    &fw_dma_attr, &iwk_dma_accattr,
1051 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1052 	    &sc->sc_dma_fw_data);
1053 	dma_p = &sc->sc_dma_fw_data;
1054 	IWK_DBG((IWK_DEBUG_DMA, "data[ncookies:%d addr:%lx size:%lx]\n",
1055 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1056 	    dma_p->cookie.dmac_size));
1057 	if (err != DDI_SUCCESS) {
1058 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1059 		    " data dma memory");
1060 		goto fail;
1061 	}
1062 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1063 
1064 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1065 	    &fw_dma_attr, &iwk_dma_accattr,
1066 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1067 	    &sc->sc_dma_fw_data_bak);
1068 	dma_p = &sc->sc_dma_fw_data_bak;
1069 	IWK_DBG((IWK_DEBUG_DMA, "data_bak[ncookies:%d addr:%lx "
1070 	    "size:%lx]\n",
1071 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1072 	    dma_p->cookie.dmac_size));
1073 	if (err != DDI_SUCCESS) {
1074 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1075 		    " data bakeup dma memory");
1076 		goto fail;
1077 	}
1078 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1079 
1080 	t += LE_32(sc->sc_hdr->datasz);
1081 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz),
1082 	    &fw_dma_attr, &iwk_dma_accattr,
1083 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1084 	    &sc->sc_dma_fw_init_text);
1085 	dma_p = &sc->sc_dma_fw_init_text;
1086 	IWK_DBG((IWK_DEBUG_DMA, "init_text[ncookies:%d addr:%lx "
1087 	    "size:%lx]\n",
1088 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1089 	    dma_p->cookie.dmac_size));
1090 	if (err != DDI_SUCCESS) {
1091 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1092 		    "init text dma memory");
1093 		goto fail;
1094 	}
1095 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_textsz));
1096 
1097 	t += LE_32(sc->sc_hdr->init_textsz);
1098 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz),
1099 	    &fw_dma_attr, &iwk_dma_accattr,
1100 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1101 	    &sc->sc_dma_fw_init_data);
1102 	dma_p = &sc->sc_dma_fw_init_data;
1103 	IWK_DBG((IWK_DEBUG_DMA, "init_data[ncookies:%d addr:%lx "
1104 	    "size:%lx]\n",
1105 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1106 	    dma_p->cookie.dmac_size));
1107 	if (err != DDI_SUCCESS) {
1108 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1109 		    "init data dma memory");
1110 		goto fail;
1111 	}
1112 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_datasz));
1113 
1114 	sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz);
1115 fail:
1116 	return (err);
1117 }
1118 
1119 static void
1120 iwk_free_fw_dma(iwk_sc_t *sc)
1121 {
1122 	iwk_free_dma_mem(&sc->sc_dma_fw_text);
1123 	iwk_free_dma_mem(&sc->sc_dma_fw_data);
1124 	iwk_free_dma_mem(&sc->sc_dma_fw_data_bak);
1125 	iwk_free_dma_mem(&sc->sc_dma_fw_init_text);
1126 	iwk_free_dma_mem(&sc->sc_dma_fw_init_data);
1127 }
1128 
1129 /*
1130  * Allocate a shared page between host and NIC.
1131  */
1132 static int
1133 iwk_alloc_shared(iwk_sc_t *sc)
1134 {
1135 	iwk_dma_t *dma_p;
1136 	int err = DDI_SUCCESS;
1137 
1138 	/* must be aligned on a 4K-page boundary */
1139 	err = iwk_alloc_dma_mem(sc, sizeof (iwk_shared_t),
1140 	    &sh_dma_attr, &iwk_dma_accattr,
1141 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1142 	    &sc->sc_dma_sh);
1143 	if (err != DDI_SUCCESS)
1144 		goto fail;
1145 	sc->sc_shared = (iwk_shared_t *)sc->sc_dma_sh.mem_va;
1146 
1147 	dma_p = &sc->sc_dma_sh;
1148 	IWK_DBG((IWK_DEBUG_DMA, "sh[ncookies:%d addr:%lx size:%lx]\n",
1149 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1150 	    dma_p->cookie.dmac_size));
1151 
1152 	return (err);
1153 fail:
1154 	iwk_free_shared(sc);
1155 	return (err);
1156 }
1157 
1158 static void
1159 iwk_free_shared(iwk_sc_t *sc)
1160 {
1161 	iwk_free_dma_mem(&sc->sc_dma_sh);
1162 }
1163 
1164 /*
1165  * Allocate a keep warm page.
1166  */
1167 static int
1168 iwk_alloc_kw(iwk_sc_t *sc)
1169 {
1170 	iwk_dma_t *dma_p;
1171 	int err = DDI_SUCCESS;
1172 
1173 	/* must be aligned on a 4K-page boundary */
1174 	err = iwk_alloc_dma_mem(sc, IWK_KW_SIZE,
1175 	    &kw_dma_attr, &iwk_dma_accattr,
1176 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1177 	    &sc->sc_dma_kw);
1178 	if (err != DDI_SUCCESS)
1179 		goto fail;
1180 
1181 	dma_p = &sc->sc_dma_kw;
1182 	IWK_DBG((IWK_DEBUG_DMA, "kw[ncookies:%d addr:%lx size:%lx]\n",
1183 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1184 	    dma_p->cookie.dmac_size));
1185 
1186 	return (err);
1187 fail:
1188 	iwk_free_kw(sc);
1189 	return (err);
1190 }
1191 
1192 static void
1193 iwk_free_kw(iwk_sc_t *sc)
1194 {
1195 	iwk_free_dma_mem(&sc->sc_dma_kw);
1196 }
1197 
1198 static int
1199 iwk_alloc_rx_ring(iwk_sc_t *sc)
1200 {
1201 	iwk_rx_ring_t *ring;
1202 	iwk_rx_data_t *data;
1203 	iwk_dma_t *dma_p;
1204 	int i, err = DDI_SUCCESS;
1205 
1206 	ring = &sc->sc_rxq;
1207 	ring->cur = 0;
1208 
1209 	err = iwk_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t),
1210 	    &ring_desc_dma_attr, &iwk_dma_accattr,
1211 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1212 	    &ring->dma_desc);
1213 	if (err != DDI_SUCCESS) {
1214 		cmn_err(CE_WARN, "dma alloc rx ring desc failed\n");
1215 		goto fail;
1216 	}
1217 	ring->desc = (uint32_t *)ring->dma_desc.mem_va;
1218 	dma_p = &ring->dma_desc;
1219 	IWK_DBG((IWK_DEBUG_DMA, "rx bd[ncookies:%d addr:%lx size:%lx]\n",
1220 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1221 	    dma_p->cookie.dmac_size));
1222 
1223 	/*
1224 	 * Allocate Rx buffers.
1225 	 */
1226 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1227 		data = &ring->data[i];
1228 		err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1229 		    &rx_buffer_dma_attr, &iwk_dma_accattr,
1230 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1231 		    &data->dma_data);
1232 		if (err != DDI_SUCCESS) {
1233 			cmn_err(CE_WARN, "dma alloc rx ring buf[%d] "
1234 			    "failed\n", i);
1235 			goto fail;
1236 		}
1237 		/*
1238 		 * the physical address bit [8-36] are used,
1239 		 * instead of bit [0-31] in 3945.
1240 		 */
1241 		ring->desc[i] = LE_32((uint32_t)
1242 		    (data->dma_data.cookie.dmac_address >> 8));
1243 	}
1244 	dma_p = &ring->data[0].dma_data;
1245 	IWK_DBG((IWK_DEBUG_DMA, "rx buffer[0][ncookies:%d addr:%lx "
1246 	    "size:%lx]\n",
1247 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1248 	    dma_p->cookie.dmac_size));
1249 
1250 	IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1251 
1252 	return (err);
1253 
1254 fail:
1255 	iwk_free_rx_ring(sc);
1256 	return (err);
1257 }
1258 
1259 static void
1260 iwk_reset_rx_ring(iwk_sc_t *sc)
1261 {
1262 	int n;
1263 
1264 	iwk_mac_access_enter(sc);
1265 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1266 	for (n = 0; n < 2000; n++) {
1267 		if (IWK_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24))
1268 			break;
1269 		DELAY(1000);
1270 	}
1271 
1272 	if (n == 2000)
1273 		IWK_DBG((IWK_DEBUG_DMA, "timeout resetting Rx ring\n"));
1274 
1275 	iwk_mac_access_exit(sc);
1276 
1277 	sc->sc_rxq.cur = 0;
1278 }
1279 
1280 static void
1281 iwk_free_rx_ring(iwk_sc_t *sc)
1282 {
1283 	int i;
1284 
1285 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1286 		if (sc->sc_rxq.data[i].dma_data.dma_hdl)
1287 			IWK_DMA_SYNC(sc->sc_rxq.data[i].dma_data,
1288 			    DDI_DMA_SYNC_FORCPU);
1289 		iwk_free_dma_mem(&sc->sc_rxq.data[i].dma_data);
1290 	}
1291 
1292 	if (sc->sc_rxq.dma_desc.dma_hdl)
1293 		IWK_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV);
1294 	iwk_free_dma_mem(&sc->sc_rxq.dma_desc);
1295 }
1296 
1297 static int
1298 iwk_alloc_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring,
1299     int slots, int qid)
1300 {
1301 	iwk_tx_data_t *data;
1302 	iwk_tx_desc_t *desc_h;
1303 	uint32_t paddr_desc_h;
1304 	iwk_cmd_t *cmd_h;
1305 	uint32_t paddr_cmd_h;
1306 	iwk_dma_t *dma_p;
1307 	int i, err = DDI_SUCCESS;
1308 
1309 	ring->qid = qid;
1310 	ring->count = TFD_QUEUE_SIZE_MAX;
1311 	ring->window = slots;
1312 	ring->queued = 0;
1313 	ring->cur = 0;
1314 
1315 	err = iwk_alloc_dma_mem(sc,
1316 	    TFD_QUEUE_SIZE_MAX * sizeof (iwk_tx_desc_t),
1317 	    &ring_desc_dma_attr, &iwk_dma_accattr,
1318 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1319 	    &ring->dma_desc);
1320 	if (err != DDI_SUCCESS) {
1321 		cmn_err(CE_WARN, "dma alloc tx ring desc[%d] "
1322 		    "failed\n", qid);
1323 		goto fail;
1324 	}
1325 	dma_p = &ring->dma_desc;
1326 	IWK_DBG((IWK_DEBUG_DMA, "tx bd[ncookies:%d addr:%lx size:%lx]\n",
1327 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1328 	    dma_p->cookie.dmac_size));
1329 
1330 	desc_h = (iwk_tx_desc_t *)ring->dma_desc.mem_va;
1331 	paddr_desc_h = ring->dma_desc.cookie.dmac_address;
1332 
1333 	err = iwk_alloc_dma_mem(sc,
1334 	    TFD_QUEUE_SIZE_MAX * sizeof (iwk_cmd_t),
1335 	    &cmd_dma_attr, &iwk_dma_accattr,
1336 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1337 	    &ring->dma_cmd);
1338 	if (err != DDI_SUCCESS) {
1339 		cmn_err(CE_WARN, "dma alloc tx ring cmd[%d] "
1340 		    "failed\n", qid);
1341 		goto fail;
1342 	}
1343 	dma_p = &ring->dma_cmd;
1344 	IWK_DBG((IWK_DEBUG_DMA, "tx cmd[ncookies:%d addr:%lx size:%lx]\n",
1345 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1346 	    dma_p->cookie.dmac_size));
1347 
1348 	cmd_h = (iwk_cmd_t *)ring->dma_cmd.mem_va;
1349 	paddr_cmd_h = ring->dma_cmd.cookie.dmac_address;
1350 
1351 	/*
1352 	 * Allocate Tx buffers.
1353 	 */
1354 	ring->data = kmem_zalloc(sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX,
1355 	    KM_NOSLEEP);
1356 	if (ring->data == NULL) {
1357 		cmn_err(CE_WARN, "could not allocate tx data slots\n");
1358 		goto fail;
1359 	}
1360 
1361 	for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) {
1362 		data = &ring->data[i];
1363 		err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1364 		    &tx_buffer_dma_attr, &iwk_dma_accattr,
1365 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
1366 		    &data->dma_data);
1367 		if (err != DDI_SUCCESS) {
1368 			cmn_err(CE_WARN, "dma alloc tx ring "
1369 			    "buf[%d] failed\n", i);
1370 			goto fail;
1371 		}
1372 
1373 		data->desc = desc_h + i;
1374 		data->paddr_desc = paddr_desc_h +
1375 		    _PTRDIFF(data->desc, desc_h);
1376 		data->cmd = cmd_h +  i; /* (i % slots); */
1377 		/* ((i % slots) * sizeof (iwk_cmd_t)); */
1378 		data->paddr_cmd = paddr_cmd_h +
1379 		    _PTRDIFF(data->cmd, cmd_h);
1380 	}
1381 	dma_p = &ring->data[0].dma_data;
1382 	IWK_DBG((IWK_DEBUG_DMA, "tx buffer[0][ncookies:%d addr:%lx "
1383 	    "size:%lx]\n",
1384 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1385 	    dma_p->cookie.dmac_size));
1386 
1387 	return (err);
1388 
1389 fail:
1390 	if (ring->data)
1391 		kmem_free(ring->data,
1392 		    sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX);
1393 	iwk_free_tx_ring(sc, ring);
1394 	return (err);
1395 }
1396 
1397 static void
1398 iwk_reset_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring)
1399 {
1400 	iwk_tx_data_t *data;
1401 	int i, n;
1402 
1403 	iwk_mac_access_enter(sc);
1404 
1405 	IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0);
1406 	for (n = 0; n < 200; n++) {
1407 		if (IWK_READ(sc, IWK_FH_TSSR_TX_STATUS_REG) &
1408 		    IWK_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid))
1409 			break;
1410 		DELAY(10);
1411 	}
1412 	if (n == 200) {
1413 		IWK_DBG((IWK_DEBUG_DMA, "timeout reset tx ring %d\n",
1414 		    ring->qid));
1415 	}
1416 	iwk_mac_access_exit(sc);
1417 
1418 	for (i = 0; i < ring->count; i++) {
1419 		data = &ring->data[i];
1420 		IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
1421 	}
1422 
1423 	ring->queued = 0;
1424 	ring->cur = 0;
1425 }
1426 
1427 /*ARGSUSED*/
1428 static void
1429 iwk_free_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring)
1430 {
1431 	int i;
1432 
1433 	if (ring->dma_desc.dma_hdl != NULL)
1434 		IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1435 	iwk_free_dma_mem(&ring->dma_desc);
1436 
1437 	if (ring->dma_cmd.dma_hdl != NULL)
1438 		IWK_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV);
1439 	iwk_free_dma_mem(&ring->dma_cmd);
1440 
1441 	if (ring->data != NULL) {
1442 		for (i = 0; i < ring->count; i++) {
1443 			if (ring->data[i].dma_data.dma_hdl)
1444 				IWK_DMA_SYNC(ring->data[i].dma_data,
1445 				    DDI_DMA_SYNC_FORDEV);
1446 			iwk_free_dma_mem(&ring->data[i].dma_data);
1447 		}
1448 		kmem_free(ring->data, ring->count * sizeof (iwk_tx_data_t));
1449 	}
1450 }
1451 
1452 static int
1453 iwk_ring_init(iwk_sc_t *sc)
1454 {
1455 	int i, err = DDI_SUCCESS;
1456 
1457 	for (i = 0; i < IWK_NUM_QUEUES; i++) {
1458 		if (i == IWK_CMD_QUEUE_NUM)
1459 			continue;
1460 		err = iwk_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS,
1461 		    i);
1462 		if (err != DDI_SUCCESS)
1463 			goto fail;
1464 	}
1465 	err = iwk_alloc_tx_ring(sc, &sc->sc_txq[IWK_CMD_QUEUE_NUM],
1466 	    TFD_CMD_SLOTS, IWK_CMD_QUEUE_NUM);
1467 	if (err != DDI_SUCCESS)
1468 		goto fail;
1469 	err = iwk_alloc_rx_ring(sc);
1470 	if (err != DDI_SUCCESS)
1471 		goto fail;
1472 	return (err);
1473 
1474 fail:
1475 	return (err);
1476 }
1477 
1478 static void
1479 iwk_ring_free(iwk_sc_t *sc)
1480 {
1481 	int i = IWK_NUM_QUEUES;
1482 
1483 	iwk_free_rx_ring(sc);
1484 	while (--i >= 0) {
1485 		iwk_free_tx_ring(sc, &sc->sc_txq[i]);
1486 	}
1487 }
1488 
1489 /* ARGSUSED */
1490 static ieee80211_node_t *
1491 iwk_node_alloc(ieee80211com_t *ic)
1492 {
1493 	iwk_amrr_t *amrr;
1494 
1495 	amrr = kmem_zalloc(sizeof (iwk_amrr_t), KM_SLEEP);
1496 	if (amrr != NULL)
1497 		iwk_amrr_init(amrr);
1498 	return (&amrr->in);
1499 }
1500 
1501 static void
1502 iwk_node_free(ieee80211_node_t *in)
1503 {
1504 	ieee80211com_t *ic = in->in_ic;
1505 
1506 	ic->ic_node_cleanup(in);
1507 	if (in->in_wpa_ie != NULL)
1508 		ieee80211_free(in->in_wpa_ie);
1509 	kmem_free(in, sizeof (iwk_amrr_t));
1510 }
1511 
1512 /*ARGSUSED*/
1513 static int
1514 iwk_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1515 {
1516 	iwk_sc_t *sc = (iwk_sc_t *)ic;
1517 	ieee80211_node_t *in = ic->ic_bss;
1518 	enum ieee80211_state ostate = ic->ic_state;
1519 	int i, err = IWK_SUCCESS;
1520 
1521 	mutex_enter(&sc->sc_glock);
1522 	switch (nstate) {
1523 	case IEEE80211_S_SCAN:
1524 		switch (ostate) {
1525 		case IEEE80211_S_INIT:
1526 		{
1527 			iwk_add_sta_t node;
1528 
1529 			sc->sc_flags |= IWK_F_SCANNING;
1530 			iwk_set_led(sc, 2, 10, 2);
1531 
1532 			/*
1533 			 * clear association to receive beacons from
1534 			 * all BSS'es
1535 			 */
1536 			sc->sc_config.assoc_id = 0;
1537 			sc->sc_config.filter_flags &=
1538 			    ~LE_32(RXON_FILTER_ASSOC_MSK);
1539 
1540 			IWK_DBG((IWK_DEBUG_80211, "config chan %d "
1541 			    "flags %x filter_flags %x\n", sc->sc_config.chan,
1542 			    sc->sc_config.flags, sc->sc_config.filter_flags));
1543 
1544 			err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
1545 			    sizeof (iwk_rxon_cmd_t), 1);
1546 			if (err != IWK_SUCCESS) {
1547 				cmn_err(CE_WARN,
1548 				    "could not clear association\n");
1549 				sc->sc_flags &= ~IWK_F_SCANNING;
1550 				mutex_exit(&sc->sc_glock);
1551 				return (err);
1552 			}
1553 
1554 			/* add broadcast node to send probe request */
1555 			(void) memset(&node, 0, sizeof (node));
1556 			(void) memset(&node.bssid, 0xff, IEEE80211_ADDR_LEN);
1557 			node.id = IWK_BROADCAST_ID;
1558 			err = iwk_cmd(sc, REPLY_ADD_STA, &node,
1559 			    sizeof (node), 1);
1560 			if (err != IWK_SUCCESS) {
1561 				cmn_err(CE_WARN, "could not add "
1562 				    "broadcast node\n");
1563 				sc->sc_flags &= ~IWK_F_SCANNING;
1564 				mutex_exit(&sc->sc_glock);
1565 				return (err);
1566 			}
1567 			break;
1568 		}
1569 		case IEEE80211_S_SCAN:
1570 			mutex_exit(&sc->sc_glock);
1571 			/* step to next channel before actual FW scan */
1572 			err = sc->sc_newstate(ic, nstate, arg);
1573 			mutex_enter(&sc->sc_glock);
1574 			if ((err != 0) || ((err = iwk_scan(sc)) != 0)) {
1575 				cmn_err(CE_WARN,
1576 				    "could not initiate scan\n");
1577 				sc->sc_flags &= ~IWK_F_SCANNING;
1578 				ieee80211_cancel_scan(ic);
1579 			}
1580 			mutex_exit(&sc->sc_glock);
1581 			return (err);
1582 		default:
1583 			break;
1584 
1585 		}
1586 		sc->sc_clk = 0;
1587 		break;
1588 
1589 	case IEEE80211_S_AUTH:
1590 		if (ostate == IEEE80211_S_SCAN) {
1591 			sc->sc_flags &= ~IWK_F_SCANNING;
1592 		}
1593 
1594 		/* reset state to handle reassociations correctly */
1595 		sc->sc_config.assoc_id = 0;
1596 		sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
1597 
1598 		/*
1599 		 * before sending authentication and association request frame,
1600 		 * we need do something in the hardware, such as setting the
1601 		 * channel same to the target AP...
1602 		 */
1603 		if ((err = iwk_hw_set_before_auth(sc)) != 0) {
1604 			cmn_err(CE_WARN, "could not setup firmware for "
1605 			    "authentication\n");
1606 			mutex_exit(&sc->sc_glock);
1607 			return (err);
1608 		}
1609 		break;
1610 
1611 	case IEEE80211_S_RUN:
1612 		if (ostate == IEEE80211_S_SCAN) {
1613 			sc->sc_flags &= ~IWK_F_SCANNING;
1614 		}
1615 
1616 		if (ic->ic_opmode == IEEE80211_M_MONITOR) {
1617 			/* let LED blink when monitoring */
1618 			iwk_set_led(sc, 2, 10, 10);
1619 			break;
1620 		}
1621 		IWK_DBG((IWK_DEBUG_80211, "iwk: associated."));
1622 
1623 		/* none IBSS mode */
1624 		if (ic->ic_opmode != IEEE80211_M_IBSS) {
1625 			/* update adapter's configuration */
1626 			if (sc->sc_assoc_id != in->in_associd) {
1627 				cmn_err(CE_WARN,
1628 				    "associate ID mismatch: expected %d, "
1629 				    "got %d\n",
1630 				    in->in_associd, sc->sc_assoc_id);
1631 			}
1632 			sc->sc_config.assoc_id = in->in_associd & 0x3fff;
1633 			/*
1634 			 * short preamble/slot time are
1635 			 * negotiated when associating
1636 			 */
1637 			sc->sc_config.flags &=
1638 			    ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
1639 			    RXON_FLG_SHORT_SLOT_MSK);
1640 
1641 			if (ic->ic_flags & IEEE80211_F_SHSLOT)
1642 				sc->sc_config.flags |=
1643 				    LE_32(RXON_FLG_SHORT_SLOT_MSK);
1644 
1645 			if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
1646 				sc->sc_config.flags |=
1647 				    LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
1648 
1649 			sc->sc_config.filter_flags |=
1650 			    LE_32(RXON_FILTER_ASSOC_MSK);
1651 
1652 			if (ic->ic_opmode != IEEE80211_M_STA)
1653 				sc->sc_config.filter_flags |=
1654 				    LE_32(RXON_FILTER_BCON_AWARE_MSK);
1655 
1656 			IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x"
1657 			    " filter_flags %x\n",
1658 			    sc->sc_config.chan, sc->sc_config.flags,
1659 			    sc->sc_config.filter_flags));
1660 			err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
1661 			    sizeof (iwk_rxon_cmd_t), 1);
1662 			if (err != IWK_SUCCESS) {
1663 				cmn_err(CE_WARN, "could not update "
1664 				    "configuration\n");
1665 				mutex_exit(&sc->sc_glock);
1666 				return (err);
1667 			}
1668 		}
1669 
1670 		/* obtain current temperature of chipset */
1671 		sc->sc_tempera = iwk_curr_tempera(sc);
1672 
1673 		/*
1674 		 * make Tx power calibration to determine
1675 		 * the gains of DSP and radio
1676 		 */
1677 		err = iwk_tx_power_calibration(sc);
1678 		if (err) {
1679 			cmn_err(CE_WARN, "iwk_newstate(): "
1680 			    "failed to set tx power table\n");
1681 			return (err);
1682 		}
1683 
1684 		/* start automatic rate control */
1685 		mutex_enter(&sc->sc_mt_lock);
1686 		if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
1687 			sc->sc_flags |= IWK_F_RATE_AUTO_CTL;
1688 			/* set rate to some reasonable initial value */
1689 			i = in->in_rates.ir_nrates - 1;
1690 			while (i > 0 && IEEE80211_RATE(i) > 72)
1691 				i--;
1692 			in->in_txrate = i;
1693 		} else {
1694 			sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL;
1695 		}
1696 		mutex_exit(&sc->sc_mt_lock);
1697 
1698 		/* set LED on after associated */
1699 		iwk_set_led(sc, 2, 0, 1);
1700 		break;
1701 
1702 	case IEEE80211_S_INIT:
1703 		if (ostate == IEEE80211_S_SCAN) {
1704 			sc->sc_flags &= ~IWK_F_SCANNING;
1705 		}
1706 
1707 		/* set LED off after init */
1708 		iwk_set_led(sc, 2, 1, 0);
1709 		break;
1710 	case IEEE80211_S_ASSOC:
1711 		if (ostate == IEEE80211_S_SCAN) {
1712 			sc->sc_flags &= ~IWK_F_SCANNING;
1713 		}
1714 
1715 		break;
1716 	}
1717 
1718 	mutex_exit(&sc->sc_glock);
1719 
1720 	err = sc->sc_newstate(ic, nstate, arg);
1721 
1722 	if (nstate == IEEE80211_S_RUN) {
1723 
1724 		mutex_enter(&sc->sc_glock);
1725 
1726 		/*
1727 		 * make initialization for Receiver
1728 		 * sensitivity calibration
1729 		 */
1730 		err = iwk_rx_sens_init(sc);
1731 		if (err) {
1732 			cmn_err(CE_WARN, "iwk_newstate(): "
1733 			    "failed to init RX sensitivity\n");
1734 			mutex_exit(&sc->sc_glock);
1735 			return (err);
1736 		}
1737 
1738 		/* make initialization for Receiver gain balance */
1739 		err = iwk_rxgain_diff_init(sc);
1740 		if (err) {
1741 			cmn_err(CE_WARN, "iwk_newstate(): "
1742 			    "failed to init phy calibration\n");
1743 			mutex_exit(&sc->sc_glock);
1744 			return (err);
1745 		}
1746 
1747 		mutex_exit(&sc->sc_glock);
1748 
1749 	}
1750 
1751 	return (err);
1752 }
1753 
1754 /*ARGSUSED*/
1755 static int iwk_key_set(ieee80211com_t *ic, const struct ieee80211_key *k,
1756     const uint8_t mac[IEEE80211_ADDR_LEN])
1757 {
1758 	iwk_sc_t *sc = (iwk_sc_t *)ic;
1759 	iwk_add_sta_t node;
1760 	int err;
1761 
1762 	switch (k->wk_cipher->ic_cipher) {
1763 	case IEEE80211_CIPHER_WEP:
1764 	case IEEE80211_CIPHER_TKIP:
1765 		return (1); /* sofeware do it. */
1766 	case IEEE80211_CIPHER_AES_CCM:
1767 		break;
1768 	default:
1769 		return (0);
1770 	}
1771 	sc->sc_config.filter_flags &= ~(RXON_FILTER_DIS_DECRYPT_MSK |
1772 	    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
1773 
1774 	mutex_enter(&sc->sc_glock);
1775 
1776 	/* update ap/multicast node */
1777 	(void) memset(&node, 0, sizeof (node));
1778 	if (IEEE80211_IS_MULTICAST(mac)) {
1779 		(void) memset(node.bssid, 0xff, 6);
1780 		node.id = IWK_BROADCAST_ID;
1781 	} else {
1782 		IEEE80211_ADDR_COPY(node.bssid, ic->ic_bss->in_bssid);
1783 		node.id = IWK_AP_ID;
1784 	}
1785 	if (k->wk_flags & IEEE80211_KEY_XMIT) {
1786 		node.key_flags = 0;
1787 		node.keyp = k->wk_keyix;
1788 	} else {
1789 		node.key_flags = (1 << 14);
1790 		node.keyp = k->wk_keyix + 4;
1791 	}
1792 	(void) memcpy(node.key, k->wk_key, k->wk_keylen);
1793 	node.key_flags |= (STA_KEY_FLG_CCMP | (1 << 3) | (k->wk_keyix << 8));
1794 	node.sta_mask = STA_MODIFY_KEY_MASK;
1795 	node.control = 1;
1796 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
1797 	if (err != IWK_SUCCESS) {
1798 		cmn_err(CE_WARN, "iwk_key_set():"
1799 		    "failed to update ap node\n");
1800 		mutex_exit(&sc->sc_glock);
1801 		return (0);
1802 	}
1803 	mutex_exit(&sc->sc_glock);
1804 	return (1);
1805 }
1806 
1807 /*
1808  * exclusive access to mac begin.
1809  */
1810 static void
1811 iwk_mac_access_enter(iwk_sc_t *sc)
1812 {
1813 	uint32_t tmp;
1814 	int n;
1815 
1816 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
1817 	IWK_WRITE(sc, CSR_GP_CNTRL,
1818 	    tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1819 
1820 	/* wait until we succeed */
1821 	for (n = 0; n < 1000; n++) {
1822 		if ((IWK_READ(sc, CSR_GP_CNTRL) &
1823 		    (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1824 		    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) ==
1825 		    CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN)
1826 			break;
1827 		DELAY(10);
1828 	}
1829 	if (n == 1000)
1830 		IWK_DBG((IWK_DEBUG_PIO, "could not lock memory\n"));
1831 }
1832 
1833 /*
1834  * exclusive access to mac end.
1835  */
1836 static void
1837 iwk_mac_access_exit(iwk_sc_t *sc)
1838 {
1839 	uint32_t tmp = IWK_READ(sc, CSR_GP_CNTRL);
1840 	IWK_WRITE(sc, CSR_GP_CNTRL,
1841 	    tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1842 }
1843 
1844 static uint32_t
1845 iwk_mem_read(iwk_sc_t *sc, uint32_t addr)
1846 {
1847 	IWK_WRITE(sc, HBUS_TARG_MEM_RADDR, addr);
1848 	return (IWK_READ(sc, HBUS_TARG_MEM_RDAT));
1849 }
1850 
1851 static void
1852 iwk_mem_write(iwk_sc_t *sc, uint32_t addr, uint32_t data)
1853 {
1854 	IWK_WRITE(sc, HBUS_TARG_MEM_WADDR, addr);
1855 	IWK_WRITE(sc, HBUS_TARG_MEM_WDAT, data);
1856 }
1857 
1858 static uint32_t
1859 iwk_reg_read(iwk_sc_t *sc, uint32_t addr)
1860 {
1861 	IWK_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24));
1862 	return (IWK_READ(sc, HBUS_TARG_PRPH_RDAT));
1863 }
1864 
1865 static void
1866 iwk_reg_write(iwk_sc_t *sc, uint32_t addr, uint32_t data)
1867 {
1868 	IWK_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24));
1869 	IWK_WRITE(sc, HBUS_TARG_PRPH_WDAT, data);
1870 }
1871 
1872 static void
1873 iwk_reg_write_region_4(iwk_sc_t *sc, uint32_t addr,
1874     uint32_t *data, int wlen)
1875 {
1876 	for (; wlen > 0; wlen--, data++, addr += 4)
1877 		iwk_reg_write(sc, addr, *data);
1878 }
1879 
1880 
1881 /*
1882  * ucode load/initialization steps:
1883  * 1)  load Bootstrap State Machine (BSM) with "bootstrap" uCode image.
1884  * BSM contains a small memory that *always* stays powered up, so it can
1885  * retain the bootstrap program even when the card is in a power-saving
1886  * power-down state.  The BSM loads the small program into ARC processor's
1887  * instruction memory when triggered by power-up.
1888  * 2)  load Initialize image via bootstrap program.
1889  * The Initialize image sets up regulatory and calibration data for the
1890  * Runtime/Protocol uCode. This sends a REPLY_ALIVE notification when completed.
1891  * The 4965 reply contains calibration data for temperature, voltage and tx gain
1892  * correction.
1893  */
1894 static int
1895 iwk_load_firmware(iwk_sc_t *sc)
1896 {
1897 	uint32_t *boot_fw = (uint32_t *)sc->sc_boot;
1898 	uint32_t size = sc->sc_hdr->bootsz;
1899 	int n, err = IWK_SUCCESS;
1900 
1901 	/*
1902 	 * The physical address bit [4-35] of the initialize uCode.
1903 	 * In the initialize alive notify interrupt the physical address of
1904 	 * the runtime ucode will be set for loading.
1905 	 */
1906 	iwk_mac_access_enter(sc);
1907 
1908 	iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG,
1909 	    sc->sc_dma_fw_init_text.cookie.dmac_address >> 4);
1910 	iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG,
1911 	    sc->sc_dma_fw_init_data.cookie.dmac_address >> 4);
1912 	iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG,
1913 	    sc->sc_dma_fw_init_text.cookie.dmac_size);
1914 	iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG,
1915 	    sc->sc_dma_fw_init_data.cookie.dmac_size);
1916 
1917 	/* load bootstrap code into BSM memory */
1918 	iwk_reg_write_region_4(sc, BSM_SRAM_LOWER_BOUND, boot_fw,
1919 	    size / sizeof (uint32_t));
1920 
1921 	iwk_reg_write(sc, BSM_WR_MEM_SRC_REG, 0);
1922 	iwk_reg_write(sc, BSM_WR_MEM_DST_REG, RTC_INST_LOWER_BOUND);
1923 	iwk_reg_write(sc, BSM_WR_DWCOUNT_REG, size / sizeof (uint32_t));
1924 
1925 	/*
1926 	 * prepare to load initialize uCode
1927 	 */
1928 	iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
1929 
1930 	/* wait while the adapter is busy loading the firmware */
1931 	for (n = 0; n < 1000; n++) {
1932 		if (!(iwk_reg_read(sc, BSM_WR_CTRL_REG) &
1933 		    BSM_WR_CTRL_REG_BIT_START))
1934 			break;
1935 		DELAY(10);
1936 	}
1937 	if (n == 1000) {
1938 		cmn_err(CE_WARN, "timeout transferring firmware\n");
1939 		err = ETIMEDOUT;
1940 		return (err);
1941 	}
1942 
1943 	/* for future power-save mode use */
1944 	iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
1945 
1946 	iwk_mac_access_exit(sc);
1947 
1948 	return (err);
1949 }
1950 
1951 /*ARGSUSED*/
1952 static void
1953 iwk_rx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data)
1954 {
1955 	ieee80211com_t *ic = &sc->sc_ic;
1956 	iwk_rx_ring_t *ring = &sc->sc_rxq;
1957 	iwk_rx_phy_res_t *stat;
1958 	ieee80211_node_t *in;
1959 	uint32_t *tail;
1960 	struct ieee80211_frame *wh;
1961 	mblk_t *mp;
1962 	uint16_t len, rssi, mrssi, agc;
1963 	int16_t t;
1964 	uint32_t ants, i;
1965 	struct iwk_rx_non_cfg_phy *phyinfo;
1966 
1967 	/* assuming not 11n here. cope with 11n in phase-II */
1968 	stat = (iwk_rx_phy_res_t *)(desc + 1);
1969 	if (stat->cfg_phy_cnt > 20) {
1970 		return;
1971 	}
1972 
1973 	phyinfo = (struct iwk_rx_non_cfg_phy *)stat->non_cfg_phy;
1974 	agc = (phyinfo->agc_info & IWK_AGC_DB_MASK) >> IWK_AGC_DB_POS;
1975 	mrssi = 0;
1976 	ants = (stat->phy_flags & RX_PHY_FLAGS_ANTENNAE_MASK) >>
1977 	    RX_PHY_FLAGS_ANTENNAE_OFFSET;
1978 	for (i = 0; i < 3; i++) {
1979 		if (ants & (1 << i))
1980 			mrssi = MAX(mrssi, phyinfo->rssi_info[i << 1]);
1981 	}
1982 	t = mrssi - agc - 44; /* t is the dBM value */
1983 	/*
1984 	 * convert dBm to percentage ???
1985 	 */
1986 	rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t))) /
1987 	    (75 * 75);
1988 	if (rssi > 100)
1989 		rssi = 100;
1990 	if (rssi < 1)
1991 		rssi = 1;
1992 	len = stat->byte_count;
1993 	tail = (uint32_t *)((uint8_t *)(stat + 1) + stat->cfg_phy_cnt + len);
1994 
1995 	IWK_DBG((IWK_DEBUG_RX, "rx intr: idx=%d phy_len=%x len=%d "
1996 	    "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x "
1997 	    "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat),
1998 	    len, stat->rate.r.s.rate, stat->channel,
1999 	    LE_32(stat->timestampl), stat->non_cfg_phy_cnt,
2000 	    stat->cfg_phy_cnt, LE_32(*tail)));
2001 
2002 	if ((len < 16) || (len > sc->sc_dmabuf_sz)) {
2003 		IWK_DBG((IWK_DEBUG_RX, "rx frame oversize\n"));
2004 		return;
2005 	}
2006 
2007 	/*
2008 	 * discard Rx frames with bad CRC
2009 	 */
2010 	if ((LE_32(*tail) &
2011 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) !=
2012 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) {
2013 		IWK_DBG((IWK_DEBUG_RX, "rx crc error tail: %x\n",
2014 		    LE_32(*tail)));
2015 		sc->sc_rx_err++;
2016 		return;
2017 	}
2018 
2019 	wh = (struct ieee80211_frame *)
2020 	    ((uint8_t *)(stat + 1)+ stat->cfg_phy_cnt);
2021 	if (*(uint8_t *)wh == IEEE80211_FC0_SUBTYPE_ASSOC_RESP) {
2022 		sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2);
2023 		IWK_DBG((IWK_DEBUG_RX, "rx : association id = %x\n",
2024 		    sc->sc_assoc_id));
2025 	}
2026 #ifdef DEBUG
2027 	if (iwk_dbg_flags & IWK_DEBUG_RX)
2028 		ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0);
2029 #endif
2030 	in = ieee80211_find_rxnode(ic, wh);
2031 	mp = allocb(len, BPRI_MED);
2032 	if (mp) {
2033 		(void) memcpy(mp->b_wptr, wh, len);
2034 		mp->b_wptr += len;
2035 
2036 		/* send the frame to the 802.11 layer */
2037 		(void) ieee80211_input(ic, mp, in, rssi, 0);
2038 	} else {
2039 		sc->sc_rx_nobuf++;
2040 		IWK_DBG((IWK_DEBUG_RX,
2041 		    "iwk_rx_intr(): alloc rx buf failed\n"));
2042 	}
2043 	/* release node reference */
2044 	ieee80211_free_node(in);
2045 }
2046 
2047 /*ARGSUSED*/
2048 static void
2049 iwk_tx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data)
2050 {
2051 	ieee80211com_t *ic = &sc->sc_ic;
2052 	iwk_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3];
2053 	iwk_tx_stat_t *stat = (iwk_tx_stat_t *)(desc + 1);
2054 	iwk_amrr_t *amrr = (iwk_amrr_t *)ic->ic_bss;
2055 
2056 	IWK_DBG((IWK_DEBUG_TX, "tx done: qid=%d idx=%d"
2057 	    " retries=%d frame_count=%x nkill=%d "
2058 	    "rate=%x duration=%d status=%x\n",
2059 	    desc->hdr.qid, desc->hdr.idx, stat->ntries, stat->frame_count,
2060 	    stat->bt_kill_count, stat->rate.r.s.rate,
2061 	    LE_32(stat->duration), LE_32(stat->status)));
2062 
2063 	amrr->txcnt++;
2064 	IWK_DBG((IWK_DEBUG_RATECTL, "tx: %d cnt\n", amrr->txcnt));
2065 	if (stat->ntries > 0) {
2066 		amrr->retrycnt++;
2067 		sc->sc_tx_retries++;
2068 		IWK_DBG((IWK_DEBUG_TX, "tx: %d retries\n",
2069 		    sc->sc_tx_retries));
2070 	}
2071 
2072 	sc->sc_tx_timer = 0;
2073 
2074 	mutex_enter(&sc->sc_tx_lock);
2075 	ring->queued--;
2076 	if (ring->queued < 0)
2077 		ring->queued = 0;
2078 	if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count << 3))) {
2079 		sc->sc_need_reschedule = 0;
2080 		mutex_exit(&sc->sc_tx_lock);
2081 		mac_tx_update(ic->ic_mach);
2082 		mutex_enter(&sc->sc_tx_lock);
2083 	}
2084 	mutex_exit(&sc->sc_tx_lock);
2085 }
2086 
2087 static void
2088 iwk_cmd_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc)
2089 {
2090 	if ((desc->hdr.qid & 7) != 4) {
2091 		return;
2092 	}
2093 	mutex_enter(&sc->sc_glock);
2094 	sc->sc_flags |= IWK_F_CMD_DONE;
2095 	cv_signal(&sc->sc_cmd_cv);
2096 	mutex_exit(&sc->sc_glock);
2097 	IWK_DBG((IWK_DEBUG_CMD, "rx cmd: "
2098 	    "qid=%x idx=%d flags=%x type=0x%x\n",
2099 	    desc->hdr.qid, desc->hdr.idx, desc->hdr.flags,
2100 	    desc->hdr.type));
2101 }
2102 
2103 static void
2104 iwk_ucode_alive(iwk_sc_t *sc, iwk_rx_desc_t *desc)
2105 {
2106 	uint32_t base, i;
2107 	struct iwk_alive_resp *ar =
2108 	    (struct iwk_alive_resp *)(desc + 1);
2109 
2110 	/* the microcontroller is ready */
2111 	IWK_DBG((IWK_DEBUG_FW,
2112 	    "microcode alive notification minor: %x major: %x type:"
2113 	    " %x subtype: %x\n",
2114 	    ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype));
2115 
2116 	if (LE_32(ar->is_valid) != UCODE_VALID_OK) {
2117 		IWK_DBG((IWK_DEBUG_FW,
2118 		    "microcontroller initialization failed\n"));
2119 	}
2120 	if (ar->ver_subtype == INITIALIZE_SUBTYPE) {
2121 		IWK_DBG((IWK_DEBUG_FW,
2122 		    "initialization alive received.\n"));
2123 		(void) memcpy(&sc->sc_card_alive_init, ar,
2124 		    sizeof (struct iwk_init_alive_resp));
2125 		/* XXX get temperature */
2126 		iwk_mac_access_enter(sc);
2127 		iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG,
2128 		    sc->sc_dma_fw_text.cookie.dmac_address >> 4);
2129 		iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG,
2130 		    sc->sc_dma_fw_data_bak.cookie.dmac_address >> 4);
2131 		iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG,
2132 		    sc->sc_dma_fw_data.cookie.dmac_size);
2133 		iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG,
2134 		    sc->sc_dma_fw_text.cookie.dmac_size | 0x80000000);
2135 		iwk_mac_access_exit(sc);
2136 	} else {
2137 		IWK_DBG((IWK_DEBUG_FW, "runtime alive received.\n"));
2138 		(void) memcpy(&sc->sc_card_alive_run, ar,
2139 		    sizeof (struct iwk_alive_resp));
2140 
2141 		/*
2142 		 * Init SCD related registers to make Tx work. XXX
2143 		 */
2144 		iwk_mac_access_enter(sc);
2145 
2146 		/* read sram address of data base */
2147 		sc->sc_scd_base = iwk_reg_read(sc, SCD_SRAM_BASE_ADDR);
2148 
2149 		/* clear and init SCD_CONTEXT_DATA_OFFSET area. 128 bytes */
2150 		for (base = sc->sc_scd_base + SCD_CONTEXT_DATA_OFFSET, i = 0;
2151 		    i < 128; i += 4)
2152 			iwk_mem_write(sc, base + i, 0);
2153 
2154 		/* clear and init SCD_TX_STTS_BITMAP_OFFSET area. 256 bytes */
2155 		for (base = sc->sc_scd_base + SCD_TX_STTS_BITMAP_OFFSET;
2156 		    i < 256; i += 4)
2157 			iwk_mem_write(sc, base + i, 0);
2158 
2159 		/* clear and init SCD_TRANSLATE_TBL_OFFSET area. 32 bytes */
2160 		for (base = sc->sc_scd_base + SCD_TRANSLATE_TBL_OFFSET;
2161 		    i < sizeof (uint16_t) * IWK_NUM_QUEUES; i += 4)
2162 			iwk_mem_write(sc, base + i, 0);
2163 
2164 		iwk_reg_write(sc, SCD_DRAM_BASE_ADDR,
2165 		    sc->sc_dma_sh.cookie.dmac_address >> 10);
2166 		iwk_reg_write(sc, SCD_QUEUECHAIN_SEL, 0);
2167 
2168 		/* initiate the tx queues */
2169 		for (i = 0; i < IWK_NUM_QUEUES; i++) {
2170 			iwk_reg_write(sc, SCD_QUEUE_RDPTR(i), 0);
2171 			IWK_WRITE(sc, HBUS_TARG_WRPTR, (i << 8));
2172 			iwk_mem_write(sc, sc->sc_scd_base +
2173 			    SCD_CONTEXT_QUEUE_OFFSET(i),
2174 			    (SCD_WIN_SIZE & 0x7f));
2175 			iwk_mem_write(sc, sc->sc_scd_base +
2176 			    SCD_CONTEXT_QUEUE_OFFSET(i) + sizeof (uint32_t),
2177 			    (SCD_FRAME_LIMIT & 0x7f) << 16);
2178 		}
2179 		/* interrupt enable on each queue0-7 */
2180 		iwk_reg_write(sc, SCD_INTERRUPT_MASK,
2181 		    (1 << IWK_NUM_QUEUES) - 1);
2182 		/* enable  each channel 0-7 */
2183 		iwk_reg_write(sc, SCD_TXFACT,
2184 		    SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
2185 		/*
2186 		 * queue 0-7 maps to FIFO 0-7 and
2187 		 * all queues work under FIFO mode (none-scheduler-ack)
2188 		 */
2189 		for (i = 0; i < 7; i++) {
2190 			iwk_reg_write(sc,
2191 			    SCD_QUEUE_STATUS_BITS(i),
2192 			    (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
2193 			    (i << SCD_QUEUE_STTS_REG_POS_TXF)|
2194 			    SCD_QUEUE_STTS_REG_MSK);
2195 		}
2196 		iwk_mac_access_exit(sc);
2197 
2198 		sc->sc_flags |= IWK_F_FW_INIT;
2199 		cv_signal(&sc->sc_fw_cv);
2200 	}
2201 
2202 }
2203 
2204 static uint_t
2205 /* LINTED: argument unused in function: unused */
2206 iwk_rx_softintr(caddr_t arg, caddr_t unused)
2207 {
2208 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2209 	ieee80211com_t *ic = &sc->sc_ic;
2210 	iwk_rx_desc_t *desc;
2211 	iwk_rx_data_t *data;
2212 	uint32_t index;
2213 
2214 	mutex_enter(&sc->sc_glock);
2215 	if (sc->sc_rx_softint_pending != 1) {
2216 		mutex_exit(&sc->sc_glock);
2217 		return (DDI_INTR_UNCLAIMED);
2218 	}
2219 	/* disable interrupts */
2220 	IWK_WRITE(sc, CSR_INT_MASK, 0);
2221 	mutex_exit(&sc->sc_glock);
2222 
2223 	/*
2224 	 * firmware has moved the index of the rx queue, driver get it,
2225 	 * and deal with it.
2226 	 */
2227 	index = LE_32(sc->sc_shared->val0) & 0xfff;
2228 
2229 	while (sc->sc_rxq.cur != index) {
2230 		data = &sc->sc_rxq.data[sc->sc_rxq.cur];
2231 		desc = (iwk_rx_desc_t *)data->dma_data.mem_va;
2232 
2233 		IWK_DBG((IWK_DEBUG_INTR, "rx notification index = %d"
2234 		    " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n",
2235 		    index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx,
2236 		    desc->hdr.flags, desc->hdr.type, LE_32(desc->len)));
2237 
2238 		/* a command other than a tx need to be replied */
2239 		if (!(desc->hdr.qid & 0x80) &&
2240 		    (desc->hdr.type != REPLY_RX_PHY_CMD) &&
2241 		    (desc->hdr.type != REPLY_TX) &&
2242 		    (desc->hdr.type != REPLY_TX_PWR_TABLE_CMD) &&
2243 		    (desc->hdr.type != REPLY_PHY_CALIBRATION_CMD) &&
2244 		    (desc->hdr.type != SENSITIVITY_CMD))
2245 			iwk_cmd_intr(sc, desc);
2246 
2247 		switch (desc->hdr.type) {
2248 		case REPLY_4965_RX:
2249 			iwk_rx_intr(sc, desc, data);
2250 			break;
2251 
2252 		case REPLY_TX:
2253 			iwk_tx_intr(sc, desc, data);
2254 			break;
2255 
2256 		case REPLY_ALIVE:
2257 			iwk_ucode_alive(sc, desc);
2258 			break;
2259 
2260 		case CARD_STATE_NOTIFICATION:
2261 		{
2262 			uint32_t *status = (uint32_t *)(desc + 1);
2263 
2264 			IWK_DBG((IWK_DEBUG_RADIO, "state changed to %x\n",
2265 			    LE_32(*status)));
2266 
2267 			if (LE_32(*status) & 1) {
2268 				/*
2269 				 * the radio button has to be pushed(OFF). It
2270 				 * is considered as a hw error, the
2271 				 * iwk_thread() tries to recover it after the
2272 				 * button is pushed again(ON)
2273 				 */
2274 				cmn_err(CE_NOTE,
2275 				    "iwk_rx_softintr(): "
2276 				    "Radio transmitter is off\n");
2277 				sc->sc_ostate = sc->sc_ic.ic_state;
2278 				ieee80211_new_state(&sc->sc_ic,
2279 				    IEEE80211_S_INIT, -1);
2280 				sc->sc_flags |=
2281 				    (IWK_F_HW_ERR_RECOVER | IWK_F_RADIO_OFF);
2282 			}
2283 			break;
2284 		}
2285 		case SCAN_START_NOTIFICATION:
2286 		{
2287 			iwk_start_scan_t *scan =
2288 			    (iwk_start_scan_t *)(desc + 1);
2289 
2290 			IWK_DBG((IWK_DEBUG_SCAN,
2291 			    "scanning channel %d status %x\n",
2292 			    scan->chan, LE_32(scan->status)));
2293 
2294 			ic->ic_curchan = &ic->ic_sup_channels[scan->chan];
2295 			break;
2296 		}
2297 		case SCAN_COMPLETE_NOTIFICATION:
2298 		{
2299 			iwk_stop_scan_t *scan =
2300 			    (iwk_stop_scan_t *)(desc + 1);
2301 
2302 			IWK_DBG((IWK_DEBUG_SCAN,
2303 			    "completed channel %d (burst of %d) status %02x\n",
2304 			    scan->chan, scan->nchan, scan->status));
2305 
2306 			sc->sc_scan_pending++;
2307 			break;
2308 		}
2309 		case STATISTICS_NOTIFICATION:
2310 			/* handle statistics notification */
2311 			iwk_statistics_notify(sc, desc);
2312 			break;
2313 		}
2314 
2315 		sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE;
2316 	}
2317 
2318 	/*
2319 	 * driver dealt with what reveived in rx queue and tell the information
2320 	 * to the firmware.
2321 	 */
2322 	index = (index == 0) ? RX_QUEUE_SIZE - 1 : index - 1;
2323 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7));
2324 
2325 	mutex_enter(&sc->sc_glock);
2326 	/* re-enable interrupts */
2327 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2328 	sc->sc_rx_softint_pending = 0;
2329 	mutex_exit(&sc->sc_glock);
2330 
2331 	return (DDI_INTR_CLAIMED);
2332 }
2333 
2334 static uint_t
2335 /* LINTED: argument unused in function: unused */
2336 iwk_intr(caddr_t arg, caddr_t unused)
2337 {
2338 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2339 	uint32_t r, rfh;
2340 
2341 	mutex_enter(&sc->sc_glock);
2342 
2343 	if (sc->sc_flags & IWK_F_SUSPEND) {
2344 		mutex_exit(&sc->sc_glock);
2345 		return (DDI_INTR_UNCLAIMED);
2346 	}
2347 
2348 	r = IWK_READ(sc, CSR_INT);
2349 	if (r == 0 || r == 0xffffffff) {
2350 		mutex_exit(&sc->sc_glock);
2351 		return (DDI_INTR_UNCLAIMED);
2352 	}
2353 
2354 	IWK_DBG((IWK_DEBUG_INTR, "interrupt reg %x\n", r));
2355 
2356 	rfh = IWK_READ(sc, CSR_FH_INT_STATUS);
2357 	IWK_DBG((IWK_DEBUG_INTR, "FH interrupt reg %x\n", rfh));
2358 	/* disable interrupts */
2359 	IWK_WRITE(sc, CSR_INT_MASK, 0);
2360 	/* ack interrupts */
2361 	IWK_WRITE(sc, CSR_INT, r);
2362 	IWK_WRITE(sc, CSR_FH_INT_STATUS, rfh);
2363 
2364 	if (sc->sc_soft_hdl == NULL) {
2365 		mutex_exit(&sc->sc_glock);
2366 		return (DDI_INTR_CLAIMED);
2367 	}
2368 	if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) {
2369 		cmn_err(CE_WARN, "fatal firmware error\n");
2370 		mutex_exit(&sc->sc_glock);
2371 #ifdef DEBUG
2372 		/* dump event and error logs to dmesg */
2373 		iwk_write_error_log(sc);
2374 		iwk_write_event_log(sc);
2375 #endif /* DEBUG */
2376 		iwk_stop(sc);
2377 		sc->sc_ostate = sc->sc_ic.ic_state;
2378 		ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
2379 		sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
2380 		return (DDI_INTR_CLAIMED);
2381 	}
2382 
2383 	if (r & BIT_INT_RF_KILL) {
2384 		IWK_DBG((IWK_DEBUG_RADIO, "RF kill\n"));
2385 	}
2386 
2387 	if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) ||
2388 	    (rfh & FH_INT_RX_MASK)) {
2389 		sc->sc_rx_softint_pending = 1;
2390 		(void) ddi_intr_trigger_softint(sc->sc_soft_hdl, NULL);
2391 	}
2392 
2393 	if (r & BIT_INT_ALIVE)	{
2394 		IWK_DBG((IWK_DEBUG_FW, "firmware initialized.\n"));
2395 	}
2396 
2397 	/* re-enable interrupts */
2398 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2399 	mutex_exit(&sc->sc_glock);
2400 
2401 	return (DDI_INTR_CLAIMED);
2402 }
2403 
2404 static uint8_t
2405 iwk_rate_to_plcp(int rate)
2406 {
2407 	uint8_t ret;
2408 
2409 	switch (rate) {
2410 	/* CCK rates */
2411 	case 2:
2412 		ret = 0xa;
2413 		break;
2414 	case 4:
2415 		ret = 0x14;
2416 		break;
2417 	case 11:
2418 		ret = 0x37;
2419 		break;
2420 	case 22:
2421 		ret = 0x6e;
2422 		break;
2423 	/* OFDM rates */
2424 	case 12:
2425 		ret = 0xd;
2426 		break;
2427 	case 18:
2428 		ret = 0xf;
2429 		break;
2430 	case 24:
2431 		ret = 0x5;
2432 		break;
2433 	case 36:
2434 		ret = 0x7;
2435 		break;
2436 	case 48:
2437 		ret = 0x9;
2438 		break;
2439 	case 72:
2440 		ret = 0xb;
2441 		break;
2442 	case 96:
2443 		ret = 0x1;
2444 		break;
2445 	case 108:
2446 		ret = 0x3;
2447 		break;
2448 	default:
2449 		ret = 0;
2450 		break;
2451 	}
2452 	return (ret);
2453 }
2454 
2455 static mblk_t *
2456 iwk_m_tx(void *arg, mblk_t *mp)
2457 {
2458 	iwk_sc_t	*sc = (iwk_sc_t *)arg;
2459 	ieee80211com_t	*ic = &sc->sc_ic;
2460 	mblk_t			*next;
2461 
2462 	if (sc->sc_flags & IWK_F_SUSPEND) {
2463 		freemsgchain(mp);
2464 		return (NULL);
2465 	}
2466 
2467 	if (ic->ic_state != IEEE80211_S_RUN) {
2468 		freemsgchain(mp);
2469 		return (NULL);
2470 	}
2471 
2472 	while (mp != NULL) {
2473 		next = mp->b_next;
2474 		mp->b_next = NULL;
2475 		if (iwk_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) {
2476 			mp->b_next = next;
2477 			break;
2478 		}
2479 		mp = next;
2480 	}
2481 	return (mp);
2482 }
2483 
2484 /* ARGSUSED */
2485 static int
2486 iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
2487 {
2488 	iwk_sc_t *sc = (iwk_sc_t *)ic;
2489 	iwk_tx_ring_t *ring;
2490 	iwk_tx_desc_t *desc;
2491 	iwk_tx_data_t *data;
2492 	iwk_cmd_t *cmd;
2493 	iwk_tx_cmd_t *tx;
2494 	ieee80211_node_t *in;
2495 	struct ieee80211_frame *wh;
2496 	struct ieee80211_key *k = NULL;
2497 	mblk_t *m, *m0;
2498 	int rate, hdrlen, len, len0, mblen, off, err = IWK_SUCCESS;
2499 	uint16_t masks = 0;
2500 
2501 	ring = &sc->sc_txq[0];
2502 	data = &ring->data[ring->cur];
2503 	desc = data->desc;
2504 	cmd = data->cmd;
2505 	bzero(desc, sizeof (*desc));
2506 	bzero(cmd, sizeof (*cmd));
2507 
2508 	mutex_enter(&sc->sc_tx_lock);
2509 	if (sc->sc_flags & IWK_F_SUSPEND) {
2510 		mutex_exit(&sc->sc_tx_lock);
2511 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
2512 		    IEEE80211_FC0_TYPE_DATA) {
2513 			freemsg(mp);
2514 		}
2515 		err = IWK_FAIL;
2516 		goto exit;
2517 	}
2518 
2519 	if (ring->queued > ring->count - 64) {
2520 		IWK_DBG((IWK_DEBUG_TX, "iwk_send(): no txbuf\n"));
2521 		sc->sc_need_reschedule = 1;
2522 		mutex_exit(&sc->sc_tx_lock);
2523 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
2524 		    IEEE80211_FC0_TYPE_DATA) {
2525 			freemsg(mp);
2526 		}
2527 		sc->sc_tx_nobuf++;
2528 		err = IWK_FAIL;
2529 		goto exit;
2530 	}
2531 	mutex_exit(&sc->sc_tx_lock);
2532 
2533 	hdrlen = sizeof (struct ieee80211_frame);
2534 
2535 	m = allocb(msgdsize(mp) + 32, BPRI_MED);
2536 	if (m == NULL) { /* can not alloc buf, drop this package */
2537 		cmn_err(CE_WARN,
2538 		    "iwk_send(): failed to allocate msgbuf\n");
2539 		freemsg(mp);
2540 		err = IWK_SUCCESS;
2541 		goto exit;
2542 	}
2543 	for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) {
2544 		mblen = MBLKL(m0);
2545 		(void) memcpy(m->b_rptr + off, m0->b_rptr, mblen);
2546 		off += mblen;
2547 	}
2548 	m->b_wptr += off;
2549 	freemsg(mp);
2550 
2551 	wh = (struct ieee80211_frame *)m->b_rptr;
2552 
2553 	in = ieee80211_find_txnode(ic, wh->i_addr1);
2554 	if (in == NULL) {
2555 		cmn_err(CE_WARN, "iwk_send(): failed to find tx node\n");
2556 		freemsg(m);
2557 		sc->sc_tx_err++;
2558 		err = IWK_SUCCESS;
2559 		goto exit;
2560 	}
2561 	(void) ieee80211_encap(ic, m, in);
2562 
2563 	cmd->hdr.type = REPLY_TX;
2564 	cmd->hdr.flags = 0;
2565 	cmd->hdr.qid = ring->qid;
2566 	cmd->hdr.idx = ring->cur;
2567 
2568 	tx = (iwk_tx_cmd_t *)cmd->data;
2569 	tx->tx_flags = 0;
2570 
2571 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2572 		tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK));
2573 	} else {
2574 		tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
2575 	}
2576 
2577 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
2578 		k = ieee80211_crypto_encap(ic, m);
2579 		if (k == NULL) {
2580 			freemsg(m);
2581 			sc->sc_tx_err++;
2582 			err = IWK_SUCCESS;
2583 			goto exit;
2584 		}
2585 
2586 		if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
2587 			tx->sec_ctl = 2; /* for CCMP */
2588 			tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
2589 			(void) memcpy(&tx->key, k->wk_key, k->wk_keylen);
2590 		}
2591 
2592 		/* packet header may have moved, reset our local pointer */
2593 		wh = (struct ieee80211_frame *)m->b_rptr;
2594 	}
2595 
2596 	len = msgdsize(m);
2597 
2598 #ifdef DEBUG
2599 	if (iwk_dbg_flags & IWK_DEBUG_TX)
2600 		ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0);
2601 #endif
2602 
2603 	/* pickup a rate */
2604 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
2605 	    IEEE80211_FC0_TYPE_MGT) {
2606 		/* mgmt frames are sent at 1M */
2607 		rate = in->in_rates.ir_rates[0];
2608 	} else {
2609 		/*
2610 		 * do it here for the software way rate control.
2611 		 * later for rate scaling in hardware.
2612 		 * maybe like the following, for management frame:
2613 		 * tx->initial_rate_index = LINK_QUAL_MAX_RETRY_NUM - 1;
2614 		 * for data frame:
2615 		 * tx->tx_flags |= (LE_32(TX_CMD_FLG_STA_RATE_MSK));
2616 		 * rate = in->in_rates.ir_rates[in->in_txrate];
2617 		 * tx->initial_rate_index = 1;
2618 		 *
2619 		 * now the txrate is determined in tx cmd flags, set to the
2620 		 * max value 54M for 11g and 11M for 11b.
2621 		 */
2622 
2623 		if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
2624 			rate = ic->ic_fixed_rate;
2625 		} else {
2626 			rate = in->in_rates.ir_rates[in->in_txrate];
2627 		}
2628 	}
2629 	rate &= IEEE80211_RATE_VAL;
2630 	IWK_DBG((IWK_DEBUG_TX, "tx rate[%d of %d] = %x",
2631 	    in->in_txrate, in->in_rates.ir_nrates, rate));
2632 
2633 	tx->tx_flags |= (LE_32(TX_CMD_FLG_SEQ_CTL_MSK));
2634 
2635 	len0 = roundup(4 + sizeof (iwk_tx_cmd_t) + hdrlen, 4);
2636 	if (len0 != (4 + sizeof (iwk_tx_cmd_t) + hdrlen))
2637 		tx->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
2638 
2639 	/* retrieve destination node's id */
2640 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2641 		tx->sta_id = IWK_BROADCAST_ID;
2642 	} else {
2643 		if (ic->ic_opmode != IEEE80211_M_IBSS)
2644 			tx->sta_id = IWK_AP_ID;
2645 	}
2646 
2647 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
2648 	    IEEE80211_FC0_TYPE_MGT) {
2649 		/* tell h/w to set timestamp in probe responses */
2650 		if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2651 		    IEEE80211_FC0_SUBTYPE_PROBE_RESP)
2652 			tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK);
2653 
2654 		if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2655 		    IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
2656 		    ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2657 		    IEEE80211_FC0_SUBTYPE_REASSOC_REQ))
2658 			tx->timeout.pm_frame_timeout = 3;
2659 		else
2660 			tx->timeout.pm_frame_timeout = 2;
2661 	} else
2662 		tx->timeout.pm_frame_timeout = 0;
2663 	if (rate == 2 || rate == 4 || rate == 11 || rate == 22)
2664 		masks |= RATE_MCS_CCK_MSK;
2665 
2666 	masks |= RATE_MCS_ANT_B_MSK;
2667 	tx->rate.r.rate_n_flags = (iwk_rate_to_plcp(rate) | masks);
2668 
2669 	IWK_DBG((IWK_DEBUG_TX, "tx flag = %x",
2670 	    tx->tx_flags));
2671 
2672 	tx->rts_retry_limit = 60;
2673 	tx->data_retry_limit = 15;
2674 
2675 	tx->stop_time.life_time  = LE_32(0xffffffff);
2676 
2677 	tx->len = LE_16(len);
2678 
2679 	tx->dram_lsb_ptr =
2680 	    data->paddr_cmd + 4 + offsetof(iwk_tx_cmd_t, scratch);
2681 	tx->dram_msb_ptr = 0;
2682 	tx->driver_txop = 0;
2683 	tx->next_frame_len = 0;
2684 
2685 	(void) memcpy(tx + 1, m->b_rptr, hdrlen);
2686 	m->b_rptr += hdrlen;
2687 	(void) memcpy(data->dma_data.mem_va, m->b_rptr, len - hdrlen);
2688 
2689 	IWK_DBG((IWK_DEBUG_TX, "sending data: qid=%d idx=%d len=%d",
2690 	    ring->qid, ring->cur, len));
2691 
2692 	/*
2693 	 * first segment includes the tx cmd plus the 802.11 header,
2694 	 * the second includes the remaining of the 802.11 frame.
2695 	 */
2696 	desc->val0 = LE_32(2 << 24);
2697 	desc->pa[0].tb1_addr = LE_32(data->paddr_cmd);
2698 	desc->pa[0].val1 = ((len0 << 4) & 0xfff0) |
2699 	    ((data->dma_data.cookie.dmac_address & 0xffff) << 16);
2700 	desc->pa[0].val2 =
2701 	    ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) |
2702 	    ((len - hdrlen) << 20);
2703 	IWK_DBG((IWK_DEBUG_TX, "phy addr1 = 0x%x phy addr2 = 0x%x "
2704 	    "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x",
2705 	    data->paddr_cmd, data->dma_data.cookie.dmac_address,
2706 	    len0, len - hdrlen, desc->pa[0].val1, desc->pa[0].val2));
2707 
2708 	mutex_enter(&sc->sc_tx_lock);
2709 	ring->queued++;
2710 	mutex_exit(&sc->sc_tx_lock);
2711 
2712 	/* kick ring */
2713 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
2714 	    tfd_offset[ring->cur].val = 8 + len;
2715 	if (ring->cur < IWK_MAX_WIN_SIZE) {
2716 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
2717 		    tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8 + len;
2718 	}
2719 
2720 	IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
2721 	IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
2722 
2723 	ring->cur = (ring->cur + 1) % ring->count;
2724 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
2725 	freemsg(m);
2726 	/* release node reference */
2727 	ieee80211_free_node(in);
2728 
2729 	ic->ic_stats.is_tx_bytes += len;
2730 	ic->ic_stats.is_tx_frags++;
2731 
2732 	if (sc->sc_tx_timer == 0)
2733 		sc->sc_tx_timer = 10;
2734 exit:
2735 	return (err);
2736 }
2737 
2738 static void
2739 iwk_m_ioctl(void* arg, queue_t *wq, mblk_t *mp)
2740 {
2741 	iwk_sc_t	*sc  = (iwk_sc_t *)arg;
2742 	ieee80211com_t	*ic = &sc->sc_ic;
2743 	int		err;
2744 
2745 	mutex_enter(&sc->sc_glock);
2746 	if (sc->sc_flags & (IWK_F_SUSPEND | IWK_F_HW_ERR_RECOVER)) {
2747 		miocnak(wq, mp, 0, ENXIO);
2748 		mutex_exit(&sc->sc_glock);
2749 		return;
2750 	}
2751 	mutex_exit(&sc->sc_glock);
2752 
2753 	err = ieee80211_ioctl(ic, wq, mp);
2754 
2755 	if (err == ENETRESET) {
2756 		/*
2757 		 * This is special for the hidden AP connection.
2758 		 * In any case, we should make sure only one 'scan'
2759 		 * in the driver for a 'connect' CLI command. So
2760 		 * when connecting to a hidden AP, the scan is just
2761 		 * sent out to the air when we know the desired
2762 		 * essid of the AP we want to connect.
2763 		 */
2764 		if (ic->ic_des_esslen) {
2765 			if (sc->sc_flags & IWK_F_RUNNING) {
2766 				iwk_m_stop(sc);
2767 				(void) iwk_m_start(sc);
2768 				(void) ieee80211_new_state(ic,
2769 				    IEEE80211_S_SCAN, -1);
2770 			}
2771 		}
2772 	}
2773 }
2774 
2775 /*
2776  * callback functions for set/get properties
2777  */
2778 /* ARGSUSED */
2779 static int
2780 iwk_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
2781     uint_t pr_flags, uint_t wldp_length, void *wldp_buf, uint_t *perm)
2782 {
2783 	int		err = 0;
2784 	iwk_sc_t	*sc = (iwk_sc_t *)arg;
2785 
2786 	err = ieee80211_getprop(&sc->sc_ic, pr_name, wldp_pr_num,
2787 	    pr_flags, wldp_length, wldp_buf, perm);
2788 
2789 	return (err);
2790 }
2791 static int
2792 iwk_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
2793     uint_t wldp_length, const void *wldp_buf)
2794 {
2795 	int		err;
2796 	iwk_sc_t	*sc = (iwk_sc_t *)arg;
2797 	ieee80211com_t	*ic = &sc->sc_ic;
2798 
2799 	mutex_enter(&sc->sc_glock);
2800 	if (sc->sc_flags & (IWK_F_SUSPEND | IWK_F_HW_ERR_RECOVER)) {
2801 		mutex_exit(&sc->sc_glock);
2802 		return (ENXIO);
2803 	}
2804 	mutex_exit(&sc->sc_glock);
2805 
2806 	err = ieee80211_setprop(ic, pr_name, wldp_pr_num, wldp_length,
2807 	    wldp_buf);
2808 
2809 	if (err == ENETRESET) {
2810 		if (ic->ic_des_esslen) {
2811 			if (sc->sc_flags & IWK_F_RUNNING) {
2812 				iwk_m_stop(sc);
2813 				(void) iwk_m_start(sc);
2814 				(void) ieee80211_new_state(ic,
2815 				    IEEE80211_S_SCAN, -1);
2816 			}
2817 		}
2818 		err = 0;
2819 	}
2820 
2821 	return (err);
2822 }
2823 
2824 /*ARGSUSED*/
2825 static int
2826 iwk_m_stat(void *arg, uint_t stat, uint64_t *val)
2827 {
2828 	iwk_sc_t	*sc  = (iwk_sc_t *)arg;
2829 	ieee80211com_t	*ic = &sc->sc_ic;
2830 	ieee80211_node_t *in;
2831 
2832 	mutex_enter(&sc->sc_glock);
2833 	switch (stat) {
2834 	case MAC_STAT_IFSPEED:
2835 		in = ic->ic_bss;
2836 		*val = ((ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) ?
2837 		    IEEE80211_RATE(in->in_txrate) :
2838 		    ic->ic_fixed_rate) / 2 * 1000000;
2839 		break;
2840 	case MAC_STAT_NOXMTBUF:
2841 		*val = sc->sc_tx_nobuf;
2842 		break;
2843 	case MAC_STAT_NORCVBUF:
2844 		*val = sc->sc_rx_nobuf;
2845 		break;
2846 	case MAC_STAT_IERRORS:
2847 		*val = sc->sc_rx_err;
2848 		break;
2849 	case MAC_STAT_RBYTES:
2850 		*val = ic->ic_stats.is_rx_bytes;
2851 		break;
2852 	case MAC_STAT_IPACKETS:
2853 		*val = ic->ic_stats.is_rx_frags;
2854 		break;
2855 	case MAC_STAT_OBYTES:
2856 		*val = ic->ic_stats.is_tx_bytes;
2857 		break;
2858 	case MAC_STAT_OPACKETS:
2859 		*val = ic->ic_stats.is_tx_frags;
2860 		break;
2861 	case MAC_STAT_OERRORS:
2862 	case WIFI_STAT_TX_FAILED:
2863 		*val = sc->sc_tx_err;
2864 		break;
2865 	case WIFI_STAT_TX_RETRANS:
2866 		*val = sc->sc_tx_retries;
2867 		break;
2868 	case WIFI_STAT_FCS_ERRORS:
2869 	case WIFI_STAT_WEP_ERRORS:
2870 	case WIFI_STAT_TX_FRAGS:
2871 	case WIFI_STAT_MCAST_TX:
2872 	case WIFI_STAT_RTS_SUCCESS:
2873 	case WIFI_STAT_RTS_FAILURE:
2874 	case WIFI_STAT_ACK_FAILURE:
2875 	case WIFI_STAT_RX_FRAGS:
2876 	case WIFI_STAT_MCAST_RX:
2877 	case WIFI_STAT_RX_DUPS:
2878 		mutex_exit(&sc->sc_glock);
2879 		return (ieee80211_stat(ic, stat, val));
2880 	default:
2881 		mutex_exit(&sc->sc_glock);
2882 		return (ENOTSUP);
2883 	}
2884 	mutex_exit(&sc->sc_glock);
2885 
2886 	return (IWK_SUCCESS);
2887 
2888 }
2889 
2890 static int
2891 iwk_m_start(void *arg)
2892 {
2893 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2894 	ieee80211com_t	*ic = &sc->sc_ic;
2895 	int err;
2896 
2897 	err = iwk_init(sc);
2898 
2899 	if (err != IWK_SUCCESS) {
2900 		/*
2901 		 * The hw init err(eg. RF is OFF). Return Success to make
2902 		 * the 'plumb' succeed. The iwk_thread() tries to re-init
2903 		 * background.
2904 		 */
2905 		cmn_err(CE_WARN, "iwk_m_start(): failed to initialize "
2906 		    "hardware\n");
2907 		mutex_enter(&sc->sc_glock);
2908 		sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
2909 		mutex_exit(&sc->sc_glock);
2910 		return (IWK_SUCCESS);
2911 	}
2912 
2913 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2914 
2915 	mutex_enter(&sc->sc_glock);
2916 	sc->sc_flags |= IWK_F_RUNNING;
2917 	mutex_exit(&sc->sc_glock);
2918 
2919 	return (IWK_SUCCESS);
2920 }
2921 
2922 static void
2923 iwk_m_stop(void *arg)
2924 {
2925 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2926 	ieee80211com_t	*ic = &sc->sc_ic;
2927 
2928 	iwk_stop(sc);
2929 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2930 	mutex_enter(&sc->sc_mt_lock);
2931 	sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
2932 	sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL;
2933 	mutex_exit(&sc->sc_mt_lock);
2934 	mutex_enter(&sc->sc_glock);
2935 	sc->sc_flags &= ~IWK_F_RUNNING;
2936 	sc->sc_flags &= ~IWK_F_SCANNING;
2937 	mutex_exit(&sc->sc_glock);
2938 }
2939 
2940 /*ARGSUSED*/
2941 static int
2942 iwk_m_unicst(void *arg, const uint8_t *macaddr)
2943 {
2944 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2945 	ieee80211com_t	*ic = &sc->sc_ic;
2946 	int err;
2947 
2948 	if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) {
2949 		IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr);
2950 		mutex_enter(&sc->sc_glock);
2951 		err = iwk_config(sc);
2952 		mutex_exit(&sc->sc_glock);
2953 		if (err != IWK_SUCCESS) {
2954 			cmn_err(CE_WARN,
2955 			    "iwk_m_unicst(): "
2956 			    "failed to configure device\n");
2957 			goto fail;
2958 		}
2959 	}
2960 	return (IWK_SUCCESS);
2961 fail:
2962 	return (err);
2963 }
2964 
2965 /*ARGSUSED*/
2966 static int
2967 iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m)
2968 {
2969 	return (IWK_SUCCESS);
2970 }
2971 
2972 /*ARGSUSED*/
2973 static int
2974 iwk_m_promisc(void *arg, boolean_t on)
2975 {
2976 	return (IWK_SUCCESS);
2977 }
2978 
2979 static void
2980 iwk_thread(iwk_sc_t *sc)
2981 {
2982 	ieee80211com_t	*ic = &sc->sc_ic;
2983 	clock_t clk;
2984 	int times = 0, err, n = 0, timeout = 0;
2985 	uint32_t tmp;
2986 
2987 	mutex_enter(&sc->sc_mt_lock);
2988 	while (sc->sc_mf_thread_switch) {
2989 		tmp = IWK_READ(sc, CSR_GP_CNTRL);
2990 		if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) {
2991 			sc->sc_flags &= ~IWK_F_RADIO_OFF;
2992 		} else {
2993 			sc->sc_flags |= IWK_F_RADIO_OFF;
2994 		}
2995 		/*
2996 		 * If in SUSPEND or the RF is OFF, do nothing
2997 		 */
2998 		if ((sc->sc_flags & IWK_F_SUSPEND) ||
2999 		    (sc->sc_flags & IWK_F_RADIO_OFF)) {
3000 			mutex_exit(&sc->sc_mt_lock);
3001 			delay(drv_usectohz(100000));
3002 			mutex_enter(&sc->sc_mt_lock);
3003 			continue;
3004 		}
3005 
3006 		/*
3007 		 * recovery fatal error
3008 		 */
3009 		if (ic->ic_mach &&
3010 		    (sc->sc_flags & IWK_F_HW_ERR_RECOVER)) {
3011 
3012 			IWK_DBG((IWK_DEBUG_FW,
3013 			    "iwk_thread(): "
3014 			    "try to recover fatal hw error: %d\n", times++));
3015 
3016 			iwk_stop(sc);
3017 
3018 			mutex_exit(&sc->sc_mt_lock);
3019 			ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3020 			delay(drv_usectohz(2000000 + n*500000));
3021 			mutex_enter(&sc->sc_mt_lock);
3022 
3023 			err = iwk_init(sc);
3024 			if (err != IWK_SUCCESS) {
3025 				n++;
3026 				if (n < 20)
3027 					continue;
3028 			}
3029 			n = 0;
3030 			if (!err)
3031 				sc->sc_flags |= IWK_F_RUNNING;
3032 			sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
3033 			mutex_exit(&sc->sc_mt_lock);
3034 			delay(drv_usectohz(2000000));
3035 			if (sc->sc_ostate != IEEE80211_S_INIT)
3036 				ieee80211_new_state(ic, IEEE80211_S_SCAN, 0);
3037 			mutex_enter(&sc->sc_mt_lock);
3038 		}
3039 
3040 		if (ic->ic_mach && (sc->sc_flags & IWK_F_LAZY_RESUME)) {
3041 			IWK_DBG((IWK_DEBUG_RESUME,
3042 			    "iwk_thread(): "
3043 			    "lazy resume\n"));
3044 			sc->sc_flags &= ~IWK_F_LAZY_RESUME;
3045 			mutex_exit(&sc->sc_mt_lock);
3046 			/*
3047 			 * NB: under WPA mode, this call hangs (door problem?)
3048 			 * when called in iwk_attach() and iwk_detach() while
3049 			 * system is in the procedure of CPR. To be safe, let
3050 			 * the thread do this.
3051 			 */
3052 			ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
3053 			mutex_enter(&sc->sc_mt_lock);
3054 		}
3055 
3056 		if (ic->ic_mach &&
3057 		    (sc->sc_flags & IWK_F_SCANNING) && sc->sc_scan_pending) {
3058 			IWK_DBG((IWK_DEBUG_SCAN,
3059 			    "iwk_thread(): "
3060 			    "wait for probe response\n"));
3061 			sc->sc_scan_pending--;
3062 			mutex_exit(&sc->sc_mt_lock);
3063 			delay(drv_usectohz(200000));
3064 			if (sc->sc_flags & IWK_F_SCANNING)
3065 				ieee80211_next_scan(ic);
3066 			mutex_enter(&sc->sc_mt_lock);
3067 		}
3068 
3069 		/*
3070 		 * rate ctl
3071 		 */
3072 		if (ic->ic_mach &&
3073 		    (sc->sc_flags & IWK_F_RATE_AUTO_CTL)) {
3074 			clk = ddi_get_lbolt();
3075 			if (clk > sc->sc_clk + drv_usectohz(500000)) {
3076 				iwk_amrr_timeout(sc);
3077 			}
3078 		}
3079 
3080 		mutex_exit(&sc->sc_mt_lock);
3081 		delay(drv_usectohz(100000));
3082 		mutex_enter(&sc->sc_mt_lock);
3083 
3084 		if (sc->sc_tx_timer) {
3085 			timeout++;
3086 			if (timeout == 10) {
3087 				sc->sc_tx_timer--;
3088 				if (sc->sc_tx_timer == 0) {
3089 					sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
3090 					sc->sc_ostate = IEEE80211_S_RUN;
3091 					IWK_DBG((IWK_DEBUG_FW,
3092 					    "iwk_thread(): try to recover from"
3093 					    " 'send fail\n"));
3094 				}
3095 				timeout = 0;
3096 			}
3097 		}
3098 
3099 	}
3100 	sc->sc_mf_thread = NULL;
3101 	cv_signal(&sc->sc_mt_cv);
3102 	mutex_exit(&sc->sc_mt_lock);
3103 }
3104 
3105 
3106 /*
3107  * Send a command to the firmware.
3108  */
3109 static int
3110 iwk_cmd(iwk_sc_t *sc, int code, const void *buf, int size, int async)
3111 {
3112 	iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM];
3113 	iwk_tx_desc_t *desc;
3114 	iwk_cmd_t *cmd;
3115 	clock_t clk;
3116 
3117 	ASSERT(size <= sizeof (cmd->data));
3118 	ASSERT(mutex_owned(&sc->sc_glock));
3119 
3120 	IWK_DBG((IWK_DEBUG_CMD, "iwk_cmd() code[%d]", code));
3121 	desc = ring->data[ring->cur].desc;
3122 	cmd = ring->data[ring->cur].cmd;
3123 
3124 	cmd->hdr.type = (uint8_t)code;
3125 	cmd->hdr.flags = 0;
3126 	cmd->hdr.qid = ring->qid;
3127 	cmd->hdr.idx = ring->cur;
3128 	(void) memcpy(cmd->data, buf, size);
3129 	(void) memset(desc, 0, sizeof (*desc));
3130 
3131 	desc->val0 = LE_32(1 << 24);
3132 	desc->pa[0].tb1_addr =
3133 	    (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff);
3134 	desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0;
3135 
3136 	/* kick cmd ring XXX */
3137 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3138 	    tfd_offset[ring->cur].val = 8;
3139 	if (ring->cur < IWK_MAX_WIN_SIZE) {
3140 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3141 		    tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8;
3142 	}
3143 	ring->cur = (ring->cur + 1) % ring->count;
3144 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3145 
3146 	if (async)
3147 		return (IWK_SUCCESS);
3148 	else {
3149 		sc->sc_flags &= ~IWK_F_CMD_DONE;
3150 		clk = ddi_get_lbolt() + drv_usectohz(2000000);
3151 		while (!(sc->sc_flags & IWK_F_CMD_DONE)) {
3152 			if (cv_timedwait(&sc->sc_cmd_cv, &sc->sc_glock, clk) <
3153 			    0)
3154 				break;
3155 		}
3156 		if (sc->sc_flags & IWK_F_CMD_DONE)
3157 			return (IWK_SUCCESS);
3158 		else
3159 			return (IWK_FAIL);
3160 	}
3161 }
3162 
3163 static void
3164 iwk_set_led(iwk_sc_t *sc, uint8_t id, uint8_t off, uint8_t on)
3165 {
3166 	iwk_led_cmd_t led;
3167 
3168 	led.interval = LE_32(100000);	/* unit: 100ms */
3169 	led.id = id;
3170 	led.off = off;
3171 	led.on = on;
3172 
3173 	(void) iwk_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1);
3174 }
3175 
3176 static int
3177 iwk_hw_set_before_auth(iwk_sc_t *sc)
3178 {
3179 	ieee80211com_t *ic = &sc->sc_ic;
3180 	ieee80211_node_t *in = ic->ic_bss;
3181 	iwk_add_sta_t node;
3182 	iwk_link_quality_cmd_t link_quality;
3183 	struct ieee80211_rateset rs;
3184 	uint16_t masks = 0, rate;
3185 	int i, err;
3186 
3187 	/* update adapter's configuration according the info of target AP */
3188 	IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
3189 	sc->sc_config.chan = ieee80211_chan2ieee(ic, in->in_chan);
3190 	if (ic->ic_curmode == IEEE80211_MODE_11B) {
3191 		sc->sc_config.cck_basic_rates  = 0x03;
3192 		sc->sc_config.ofdm_basic_rates = 0;
3193 	} else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
3194 	    (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
3195 		sc->sc_config.cck_basic_rates  = 0;
3196 		sc->sc_config.ofdm_basic_rates = 0x15;
3197 	} else { /* assume 802.11b/g */
3198 		sc->sc_config.cck_basic_rates  = 0x0f;
3199 		sc->sc_config.ofdm_basic_rates = 0xff;
3200 	}
3201 
3202 	sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
3203 	    RXON_FLG_SHORT_SLOT_MSK);
3204 
3205 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
3206 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK);
3207 	else
3208 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK);
3209 
3210 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
3211 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
3212 	else
3213 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK);
3214 
3215 	IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x "
3216 	    "filter_flags %x  cck %x ofdm %x"
3217 	    " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n",
3218 	    sc->sc_config.chan, sc->sc_config.flags,
3219 	    sc->sc_config.filter_flags,
3220 	    sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates,
3221 	    sc->sc_config.bssid[0], sc->sc_config.bssid[1],
3222 	    sc->sc_config.bssid[2], sc->sc_config.bssid[3],
3223 	    sc->sc_config.bssid[4], sc->sc_config.bssid[5]));
3224 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
3225 	    sizeof (iwk_rxon_cmd_t), 1);
3226 	if (err != IWK_SUCCESS) {
3227 		cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
3228 		    " failed to config chan%d\n",
3229 		    sc->sc_config.chan);
3230 		return (err);
3231 	}
3232 
3233 	/* obtain current temperature of chipset */
3234 	sc->sc_tempera = iwk_curr_tempera(sc);
3235 
3236 	/* make Tx power calibration to determine the gains of DSP and radio */
3237 	err = iwk_tx_power_calibration(sc);
3238 	if (err) {
3239 		cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
3240 		    "failed to set tx power table\n");
3241 		return (err);
3242 	}
3243 
3244 	/* add default AP node */
3245 	(void) memset(&node, 0, sizeof (node));
3246 	IEEE80211_ADDR_COPY(node.bssid, in->in_bssid);
3247 	node.id = IWK_AP_ID;
3248 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
3249 	if (err != IWK_SUCCESS) {
3250 		cmn_err(CE_WARN, "iwk_hw_set_before_auth(): "
3251 		    "failed to add BSS node\n");
3252 		return (err);
3253 	}
3254 
3255 	/* TX_LINK_QUALITY cmd ? */
3256 	(void) memset(&link_quality, 0, sizeof (link_quality));
3257 	rs = ic->ic_sup_rates[ieee80211_chan2mode(ic, ic->ic_curchan)];
3258 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3259 		if (i < rs.ir_nrates)
3260 			rate = rs.ir_rates[rs.ir_nrates - i];
3261 		else
3262 			rate = 2;
3263 		if (rate == 2 || rate == 4 || rate == 11 || rate == 22)
3264 			masks |= RATE_MCS_CCK_MSK;
3265 		masks |= RATE_MCS_ANT_B_MSK;
3266 		masks &= ~RATE_MCS_ANT_A_MSK;
3267 		link_quality.rate_n_flags[i] =
3268 		    iwk_rate_to_plcp(rate) | masks;
3269 	}
3270 
3271 	link_quality.general_params.single_stream_ant_msk = 2;
3272 	link_quality.general_params.dual_stream_ant_msk = 3;
3273 	link_quality.agg_params.agg_dis_start_th = 3;
3274 	link_quality.agg_params.agg_time_limit = LE_16(4000);
3275 	link_quality.sta_id = IWK_AP_ID;
3276 	err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
3277 	    sizeof (link_quality), 1);
3278 	if (err != IWK_SUCCESS) {
3279 		cmn_err(CE_WARN, "iwk_hw_set_before_auth(): "
3280 		    "failed to config link quality table\n");
3281 		return (err);
3282 	}
3283 
3284 	return (IWK_SUCCESS);
3285 }
3286 
3287 /*
3288  * Send a scan request(assembly scan cmd) to the firmware.
3289  */
3290 static int
3291 iwk_scan(iwk_sc_t *sc)
3292 {
3293 	ieee80211com_t *ic = &sc->sc_ic;
3294 	iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM];
3295 	iwk_tx_desc_t *desc;
3296 	iwk_tx_data_t *data;
3297 	iwk_cmd_t *cmd;
3298 	iwk_scan_hdr_t *hdr;
3299 	iwk_scan_chan_t *chan;
3300 	struct ieee80211_frame *wh;
3301 	ieee80211_node_t *in = ic->ic_bss;
3302 	uint8_t essid[IEEE80211_NWID_LEN+1];
3303 	struct ieee80211_rateset *rs;
3304 	enum ieee80211_phymode mode;
3305 	uint8_t *frm;
3306 	int i, pktlen, nrates;
3307 
3308 	data = &ring->data[ring->cur];
3309 	desc = data->desc;
3310 	cmd = (iwk_cmd_t *)data->dma_data.mem_va;
3311 
3312 	cmd->hdr.type = REPLY_SCAN_CMD;
3313 	cmd->hdr.flags = 0;
3314 	cmd->hdr.qid = ring->qid;
3315 	cmd->hdr.idx = ring->cur | 0x40;
3316 
3317 	hdr = (iwk_scan_hdr_t *)cmd->data;
3318 	(void) memset(hdr, 0, sizeof (iwk_scan_hdr_t));
3319 	hdr->nchan = 1;
3320 	hdr->quiet_time = LE_16(50);
3321 	hdr->quiet_plcp_th = LE_16(1);
3322 
3323 	hdr->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
3324 	hdr->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3325 	    LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) |
3326 	    (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3327 	    (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3328 
3329 	hdr->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
3330 	hdr->tx_cmd.sta_id = IWK_BROADCAST_ID;
3331 	hdr->tx_cmd.stop_time.life_time = 0xffffffff;
3332 	hdr->tx_cmd.tx_flags |= (0x200);
3333 	hdr->tx_cmd.rate.r.rate_n_flags = iwk_rate_to_plcp(2);
3334 	hdr->tx_cmd.rate.r.rate_n_flags |=
3335 	    (RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK);
3336 	hdr->direct_scan[0].len = ic->ic_des_esslen;
3337 	hdr->direct_scan[0].id  = IEEE80211_ELEMID_SSID;
3338 
3339 	if (ic->ic_des_esslen) {
3340 		bcopy(ic->ic_des_essid, essid, ic->ic_des_esslen);
3341 		essid[ic->ic_des_esslen] = '\0';
3342 		IWK_DBG((IWK_DEBUG_SCAN, "directed scan %s\n", essid));
3343 
3344 		bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid,
3345 		    ic->ic_des_esslen);
3346 	} else {
3347 		bzero(hdr->direct_scan[0].ssid,
3348 		    sizeof (hdr->direct_scan[0].ssid));
3349 	}
3350 	/*
3351 	 * a probe request frame is required after the REPLY_SCAN_CMD
3352 	 */
3353 	wh = (struct ieee80211_frame *)(hdr + 1);
3354 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
3355 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
3356 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
3357 	(void) memset(wh->i_addr1, 0xff, 6);
3358 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr);
3359 	(void) memset(wh->i_addr3, 0xff, 6);
3360 	*(uint16_t *)&wh->i_dur[0] = 0;
3361 	*(uint16_t *)&wh->i_seq[0] = 0;
3362 
3363 	frm = (uint8_t *)(wh + 1);
3364 
3365 	/* essid IE */
3366 	if (in->in_esslen) {
3367 		bcopy(in->in_essid, essid, in->in_esslen);
3368 		essid[in->in_esslen] = '\0';
3369 		IWK_DBG((IWK_DEBUG_SCAN, "probe with ESSID %s\n",
3370 		    essid));
3371 	}
3372 	*frm++ = IEEE80211_ELEMID_SSID;
3373 	*frm++ = in->in_esslen;
3374 	(void) memcpy(frm, in->in_essid, in->in_esslen);
3375 	frm += in->in_esslen;
3376 
3377 	mode = ieee80211_chan2mode(ic, ic->ic_curchan);
3378 	rs = &ic->ic_sup_rates[mode];
3379 
3380 	/* supported rates IE */
3381 	*frm++ = IEEE80211_ELEMID_RATES;
3382 	nrates = rs->ir_nrates;
3383 	if (nrates > IEEE80211_RATE_SIZE)
3384 		nrates = IEEE80211_RATE_SIZE;
3385 	*frm++ = (uint8_t)nrates;
3386 	(void) memcpy(frm, rs->ir_rates, nrates);
3387 	frm += nrates;
3388 
3389 	/* supported xrates IE */
3390 	if (rs->ir_nrates > IEEE80211_RATE_SIZE) {
3391 		nrates = rs->ir_nrates - IEEE80211_RATE_SIZE;
3392 		*frm++ = IEEE80211_ELEMID_XRATES;
3393 		*frm++ = (uint8_t)nrates;
3394 		(void) memcpy(frm, rs->ir_rates + IEEE80211_RATE_SIZE, nrates);
3395 		frm += nrates;
3396 	}
3397 
3398 	/* optionnal IE (usually for wpa) */
3399 	if (ic->ic_opt_ie != NULL) {
3400 		(void) memcpy(frm, ic->ic_opt_ie, ic->ic_opt_ie_len);
3401 		frm += ic->ic_opt_ie_len;
3402 	}
3403 
3404 	/* setup length of probe request */
3405 	hdr->tx_cmd.len = LE_16(_PTRDIFF(frm, wh));
3406 	hdr->len = hdr->nchan * sizeof (iwk_scan_chan_t) +
3407 	    hdr->tx_cmd.len + sizeof (iwk_scan_hdr_t);
3408 
3409 	/*
3410 	 * the attribute of the scan channels are required after the probe
3411 	 * request frame.
3412 	 */
3413 	chan = (iwk_scan_chan_t *)frm;
3414 	for (i = 1; i <= hdr->nchan; i++, chan++) {
3415 		if (ic->ic_des_esslen) {
3416 			chan->type = 3;
3417 		} else {
3418 			chan->type = 1;
3419 		}
3420 
3421 		chan->chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
3422 		chan->tpc.tx_gain = 0x3f;
3423 		chan->tpc.dsp_atten = 110;
3424 		chan->active_dwell = LE_16(50);
3425 		chan->passive_dwell = LE_16(120);
3426 
3427 		frm += sizeof (iwk_scan_chan_t);
3428 	}
3429 
3430 	pktlen = _PTRDIFF(frm, cmd);
3431 
3432 	(void) memset(desc, 0, sizeof (*desc));
3433 	desc->val0 = LE_32(1 << 24);
3434 	desc->pa[0].tb1_addr =
3435 	    (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff);
3436 	desc->pa[0].val1 = (pktlen << 4) & 0xfff0;
3437 
3438 	/*
3439 	 * maybe for cmd, filling the byte cnt table is not necessary.
3440 	 * anyway, we fill it here.
3441 	 */
3442 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3443 	    tfd_offset[ring->cur].val = 8;
3444 	if (ring->cur < IWK_MAX_WIN_SIZE) {
3445 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3446 		    tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8;
3447 	}
3448 
3449 	/* kick cmd ring */
3450 	ring->cur = (ring->cur + 1) % ring->count;
3451 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3452 
3453 	return (IWK_SUCCESS);
3454 }
3455 
3456 static int
3457 iwk_config(iwk_sc_t *sc)
3458 {
3459 	ieee80211com_t *ic = &sc->sc_ic;
3460 	iwk_powertable_cmd_t powertable;
3461 	iwk_bt_cmd_t bt;
3462 	iwk_add_sta_t node;
3463 	iwk_link_quality_cmd_t link_quality;
3464 	int i, err;
3465 	uint16_t masks = 0;
3466 
3467 	/*
3468 	 * set power mode. Disable power management at present, do it later
3469 	 */
3470 	(void) memset(&powertable, 0, sizeof (powertable));
3471 	powertable.flags = LE_16(0x8);
3472 	err = iwk_cmd(sc, POWER_TABLE_CMD, &powertable,
3473 	    sizeof (powertable), 0);
3474 	if (err != IWK_SUCCESS) {
3475 		cmn_err(CE_WARN, "iwk_config(): failed to set power mode\n");
3476 		return (err);
3477 	}
3478 
3479 	/* configure bt coexistence */
3480 	(void) memset(&bt, 0, sizeof (bt));
3481 	bt.flags = 3;
3482 	bt.lead_time = 0xaa;
3483 	bt.max_kill = 1;
3484 	err = iwk_cmd(sc, REPLY_BT_CONFIG, &bt,
3485 	    sizeof (bt), 0);
3486 	if (err != IWK_SUCCESS) {
3487 		cmn_err(CE_WARN,
3488 		    "iwk_config(): "
3489 		    "failed to configurate bt coexistence\n");
3490 		return (err);
3491 	}
3492 
3493 	/* configure rxon */
3494 	(void) memset(&sc->sc_config, 0, sizeof (iwk_rxon_cmd_t));
3495 	IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
3496 	IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
3497 	sc->sc_config.chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
3498 	sc->sc_config.flags = (RXON_FLG_TSF2HOST_MSK |
3499 	    RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_BAND_24G_MSK);
3500 	sc->sc_config.flags &= (~RXON_FLG_CCK_MSK);
3501 	switch (ic->ic_opmode) {
3502 	case IEEE80211_M_STA:
3503 		sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
3504 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3505 		    RXON_FILTER_DIS_DECRYPT_MSK |
3506 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
3507 		break;
3508 	case IEEE80211_M_AHDEMO:
3509 		sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
3510 		sc->sc_config.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
3511 		sc->sc_config.filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3512 		    RXON_FILTER_DIS_DECRYPT_MSK |
3513 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
3514 		break;
3515 	case IEEE80211_M_HOSTAP:
3516 		sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
3517 		break;
3518 	case IEEE80211_M_MONITOR:
3519 		sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
3520 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3521 		    RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
3522 		break;
3523 	}
3524 	sc->sc_config.cck_basic_rates  = 0x0f;
3525 	sc->sc_config.ofdm_basic_rates = 0xff;
3526 
3527 	sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
3528 	sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff;
3529 
3530 	/* set antenna */
3531 
3532 	sc->sc_config.rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3533 	    LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) |
3534 	    (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3535 	    (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3536 
3537 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
3538 	    sizeof (iwk_rxon_cmd_t), 0);
3539 	if (err != IWK_SUCCESS) {
3540 		cmn_err(CE_WARN, "iwk_config(): "
3541 		    "failed to set configure command\n");
3542 		return (err);
3543 	}
3544 	/* obtain current temperature of chipset */
3545 	sc->sc_tempera = iwk_curr_tempera(sc);
3546 
3547 	/* make Tx power calibration to determine the gains of DSP and radio */
3548 	err = iwk_tx_power_calibration(sc);
3549 	if (err) {
3550 		cmn_err(CE_WARN, "iwk_config(): "
3551 		    "failed to set tx power table\n");
3552 		return (err);
3553 	}
3554 
3555 	/* add broadcast node so that we can send broadcast frame */
3556 	(void) memset(&node, 0, sizeof (node));
3557 	(void) memset(node.bssid, 0xff, 6);
3558 	node.id = IWK_BROADCAST_ID;
3559 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0);
3560 	if (err != IWK_SUCCESS) {
3561 		cmn_err(CE_WARN, "iwk_config(): "
3562 		    "failed to add broadcast node\n");
3563 		return (err);
3564 	}
3565 
3566 	/* TX_LINK_QUALITY cmd ? */
3567 	(void) memset(&link_quality, 0, sizeof (link_quality));
3568 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3569 		masks |= RATE_MCS_CCK_MSK;
3570 		masks |= RATE_MCS_ANT_B_MSK;
3571 		masks &= ~RATE_MCS_ANT_A_MSK;
3572 		link_quality.rate_n_flags[i] = iwk_rate_to_plcp(2) | masks;
3573 	}
3574 
3575 	link_quality.general_params.single_stream_ant_msk = 2;
3576 	link_quality.general_params.dual_stream_ant_msk = 3;
3577 	link_quality.agg_params.agg_dis_start_th = 3;
3578 	link_quality.agg_params.agg_time_limit = LE_16(4000);
3579 	link_quality.sta_id = IWK_BROADCAST_ID;
3580 	err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
3581 	    sizeof (link_quality), 0);
3582 	if (err != IWK_SUCCESS) {
3583 		cmn_err(CE_WARN, "iwk_config(): "
3584 		    "failed to config link quality table\n");
3585 		return (err);
3586 	}
3587 
3588 	return (IWK_SUCCESS);
3589 }
3590 
3591 static void
3592 iwk_stop_master(iwk_sc_t *sc)
3593 {
3594 	uint32_t tmp;
3595 	int n;
3596 
3597 	tmp = IWK_READ(sc, CSR_RESET);
3598 	IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER);
3599 
3600 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
3601 	if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) ==
3602 	    CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE)
3603 		return;
3604 
3605 	for (n = 0; n < 2000; n++) {
3606 		if (IWK_READ(sc, CSR_RESET) &
3607 		    CSR_RESET_REG_FLAG_MASTER_DISABLED)
3608 			break;
3609 		DELAY(1000);
3610 	}
3611 	if (n == 2000)
3612 		IWK_DBG((IWK_DEBUG_HW,
3613 		    "timeout waiting for master stop\n"));
3614 }
3615 
3616 static int
3617 iwk_power_up(iwk_sc_t *sc)
3618 {
3619 	uint32_t tmp;
3620 
3621 	iwk_mac_access_enter(sc);
3622 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3623 	tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC;
3624 	tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN;
3625 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3626 	iwk_mac_access_exit(sc);
3627 
3628 	DELAY(5000);
3629 	return (IWK_SUCCESS);
3630 }
3631 
3632 static int
3633 iwk_preinit(iwk_sc_t *sc)
3634 {
3635 	uint32_t tmp;
3636 	int n;
3637 	uint8_t vlink;
3638 
3639 	/* clear any pending interrupts */
3640 	IWK_WRITE(sc, CSR_INT, 0xffffffff);
3641 
3642 	tmp = IWK_READ(sc, CSR_GIO_CHICKEN_BITS);
3643 	IWK_WRITE(sc, CSR_GIO_CHICKEN_BITS,
3644 	    tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
3645 
3646 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
3647 	IWK_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
3648 
3649 	/* wait for clock ready */
3650 	for (n = 0; n < 1000; n++) {
3651 		if (IWK_READ(sc, CSR_GP_CNTRL) &
3652 		    CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY)
3653 			break;
3654 		DELAY(10);
3655 	}
3656 	if (n == 1000) {
3657 		cmn_err(CE_WARN,
3658 		    "iwk_preinit(): timeout waiting for clock ready\n");
3659 		return (ETIMEDOUT);
3660 	}
3661 	iwk_mac_access_enter(sc);
3662 	tmp = iwk_reg_read(sc, APMG_CLK_CTRL_REG);
3663 	iwk_reg_write(sc, APMG_CLK_CTRL_REG, tmp |
3664 	    APMG_CLK_REG_VAL_DMA_CLK_RQT | APMG_CLK_REG_VAL_BSM_CLK_RQT);
3665 
3666 	DELAY(20);
3667 	tmp = iwk_reg_read(sc, ALM_APMG_PCIDEV_STT);
3668 	iwk_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp |
3669 	    APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE);
3670 	iwk_mac_access_exit(sc);
3671 
3672 	IWK_WRITE(sc, CSR_INT_COALESCING, 512 / 32); /* ??? */
3673 
3674 	(void) iwk_power_up(sc);
3675 
3676 	if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) {
3677 		tmp = ddi_get32(sc->sc_cfg_handle,
3678 		    (uint32_t *)(sc->sc_cfg_base + 0xe8));
3679 		ddi_put32(sc->sc_cfg_handle,
3680 		    (uint32_t *)(sc->sc_cfg_base + 0xe8),
3681 		    tmp & ~(1 << 11));
3682 	}
3683 
3684 
3685 	vlink = ddi_get8(sc->sc_cfg_handle,
3686 	    (uint8_t *)(sc->sc_cfg_base + 0xf0));
3687 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0),
3688 	    vlink & ~2);
3689 
3690 	tmp = IWK_READ(sc, CSR_SW_VER);
3691 	tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
3692 	    CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
3693 	    CSR_HW_IF_CONFIG_REG_BIT_KEDRON_R;
3694 	IWK_WRITE(sc, CSR_SW_VER, tmp);
3695 
3696 	/* make sure power supply on each part of the hardware */
3697 	iwk_mac_access_enter(sc);
3698 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3699 	tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
3700 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3701 	DELAY(5);
3702 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3703 	tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
3704 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3705 	iwk_mac_access_exit(sc);
3706 	return (IWK_SUCCESS);
3707 }
3708 
3709 /*
3710  * set up semphore flag to own EEPROM
3711  */
3712 static int iwk_eep_sem_down(iwk_sc_t *sc)
3713 {
3714 	int count1, count2;
3715 	uint32_t tmp;
3716 
3717 	for (count1 = 0; count1 < 1000; count1++) {
3718 		tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG);
3719 		IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG,
3720 		    tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM);
3721 
3722 		for (count2 = 0; count2 < 2; count2++) {
3723 			if (IWK_READ(sc, CSR_HW_IF_CONFIG_REG) &
3724 			    CSR_HW_IF_CONFIG_REG_EEP_SEM)
3725 				return (IWK_SUCCESS);
3726 			DELAY(10000);
3727 		}
3728 	}
3729 	return (IWK_FAIL);
3730 }
3731 
3732 /*
3733  * reset semphore flag to release EEPROM
3734  */
3735 static void iwk_eep_sem_up(iwk_sc_t *sc)
3736 {
3737 	uint32_t tmp;
3738 
3739 	tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG);
3740 	IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG,
3741 	    tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM));
3742 }
3743 
3744 /*
3745  * This function load all infomation in eeprom into iwk_eep
3746  * structure in iwk_sc_t structure
3747  */
3748 static int iwk_eep_load(iwk_sc_t *sc)
3749 {
3750 	int i, rr;
3751 	uint32_t rv, tmp, eep_gp;
3752 	uint16_t addr, eep_sz = sizeof (sc->sc_eep_map);
3753 	uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map;
3754 
3755 	/* read eeprom gp register in CSR */
3756 	eep_gp = IWK_READ(sc, CSR_EEPROM_GP);
3757 	if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) ==
3758 	    CSR_EEPROM_GP_BAD_SIGNATURE) {
3759 		cmn_err(CE_WARN, "EEPROM not found\n");
3760 		return (IWK_FAIL);
3761 	}
3762 
3763 	rr = iwk_eep_sem_down(sc);
3764 	if (rr != 0) {
3765 		cmn_err(CE_WARN, "failed to own EEPROM\n");
3766 		return (IWK_FAIL);
3767 	}
3768 
3769 	for (addr = 0; addr < eep_sz; addr += 2) {
3770 		IWK_WRITE(sc, CSR_EEPROM_REG, addr<<1);
3771 		tmp = IWK_READ(sc, CSR_EEPROM_REG);
3772 		IWK_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2));
3773 
3774 		for (i = 0; i < 10; i++) {
3775 			rv = IWK_READ(sc, CSR_EEPROM_REG);
3776 			if (rv & 1)
3777 				break;
3778 			DELAY(10);
3779 		}
3780 
3781 		if (!(rv & 1)) {
3782 			cmn_err(CE_WARN, "time out when read EEPROM\n");
3783 			iwk_eep_sem_up(sc);
3784 			return (IWK_FAIL);
3785 		}
3786 
3787 		eep_p[addr/2] = rv >> 16;
3788 	}
3789 
3790 	iwk_eep_sem_up(sc);
3791 	return (IWK_SUCCESS);
3792 }
3793 
3794 /*
3795  * init mac address in ieee80211com_t struct
3796  */
3797 static void iwk_get_mac_from_eep(iwk_sc_t *sc)
3798 {
3799 	ieee80211com_t *ic = &sc->sc_ic;
3800 	struct iwk_eep *ep = &sc->sc_eep_map;
3801 
3802 	IEEE80211_ADDR_COPY(ic->ic_macaddr, ep->mac_address);
3803 
3804 	IWK_DBG((IWK_DEBUG_EEPROM, "mac:%2x:%2x:%2x:%2x:%2x:%2x\n",
3805 	    ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2],
3806 	    ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5]));
3807 }
3808 
3809 static int
3810 iwk_init(iwk_sc_t *sc)
3811 {
3812 	int qid, n, err;
3813 	clock_t clk;
3814 	uint32_t tmp;
3815 
3816 	mutex_enter(&sc->sc_glock);
3817 	sc->sc_flags &= ~IWK_F_FW_INIT;
3818 
3819 	(void) iwk_preinit(sc);
3820 
3821 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
3822 	if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) {
3823 		cmn_err(CE_WARN, "iwk_init(): Radio transmitter is off\n");
3824 		goto fail1;
3825 	}
3826 
3827 	/* init Rx ring */
3828 	iwk_mac_access_enter(sc);
3829 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
3830 
3831 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
3832 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
3833 	    sc->sc_rxq.dma_desc.cookie.dmac_address >> 8);
3834 
3835 	IWK_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG,
3836 	    ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address +
3837 	    offsetof(struct iwk_shared, val0)) >> 4));
3838 
3839 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG,
3840 	    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
3841 	    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
3842 	    IWK_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
3843 	    (RX_QUEUE_SIZE_LOG <<
3844 	    FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
3845 	iwk_mac_access_exit(sc);
3846 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG,
3847 	    (RX_QUEUE_SIZE - 1) & ~0x7);
3848 
3849 	/* init Tx rings */
3850 	iwk_mac_access_enter(sc);
3851 	iwk_reg_write(sc, SCD_TXFACT, 0);
3852 
3853 	/* keep warm page */
3854 	iwk_reg_write(sc, IWK_FH_KW_MEM_ADDR_REG,
3855 	    sc->sc_dma_kw.cookie.dmac_address >> 4);
3856 
3857 	for (qid = 0; qid < IWK_NUM_QUEUES; qid++) {
3858 		IWK_WRITE(sc, FH_MEM_CBBC_QUEUE(qid),
3859 		    sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8);
3860 		IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(qid),
3861 		    IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
3862 		    IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
3863 	}
3864 	iwk_mac_access_exit(sc);
3865 
3866 	/* clear "radio off" and "disable command" bits */
3867 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
3868 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR,
3869 	    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3870 
3871 	/* clear any pending interrupts */
3872 	IWK_WRITE(sc, CSR_INT, 0xffffffff);
3873 
3874 	/* enable interrupts */
3875 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
3876 
3877 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
3878 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
3879 
3880 	/*
3881 	 * backup ucode data part for future use.
3882 	 */
3883 	(void) memcpy(sc->sc_dma_fw_data_bak.mem_va,
3884 	    sc->sc_dma_fw_data.mem_va,
3885 	    sc->sc_dma_fw_data.alength);
3886 
3887 	for (n = 0; n < 2; n++) {
3888 		/* load firmware init segment into NIC */
3889 		err = iwk_load_firmware(sc);
3890 		if (err != IWK_SUCCESS) {
3891 			cmn_err(CE_WARN, "iwk_init(): "
3892 			    "failed to setup boot firmware\n");
3893 			continue;
3894 		}
3895 
3896 		/* now press "execute" start running */
3897 		IWK_WRITE(sc, CSR_RESET, 0);
3898 		break;
3899 	}
3900 	if (n == 2) {
3901 		cmn_err(CE_WARN, "iwk_init(): failed to load firmware\n");
3902 		goto fail1;
3903 	}
3904 	/* ..and wait at most one second for adapter to initialize */
3905 	clk = ddi_get_lbolt() + drv_usectohz(2000000);
3906 	while (!(sc->sc_flags & IWK_F_FW_INIT)) {
3907 		if (cv_timedwait(&sc->sc_fw_cv, &sc->sc_glock, clk) < 0)
3908 			break;
3909 	}
3910 	if (!(sc->sc_flags & IWK_F_FW_INIT)) {
3911 		cmn_err(CE_WARN,
3912 		    "iwk_init(): timeout waiting for firmware init\n");
3913 		goto fail1;
3914 	}
3915 
3916 	/*
3917 	 * at this point, the firmware is loaded OK, then config the hardware
3918 	 * with the ucode API, including rxon, txpower, etc.
3919 	 */
3920 	err = iwk_config(sc);
3921 	if (err) {
3922 		cmn_err(CE_WARN, "iwk_init(): failed to configure device\n");
3923 		goto fail1;
3924 	}
3925 
3926 	/* at this point, hardware may receive beacons :) */
3927 	mutex_exit(&sc->sc_glock);
3928 	return (IWK_SUCCESS);
3929 
3930 fail1:
3931 	err = IWK_FAIL;
3932 	mutex_exit(&sc->sc_glock);
3933 	return (err);
3934 }
3935 
3936 static void
3937 iwk_stop(iwk_sc_t *sc)
3938 {
3939 	uint32_t tmp;
3940 	int i;
3941 
3942 	if (!(sc->sc_flags & IWK_F_QUIESCED))
3943 		mutex_enter(&sc->sc_glock);
3944 
3945 	IWK_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3946 	/* disable interrupts */
3947 	IWK_WRITE(sc, CSR_INT_MASK, 0);
3948 	IWK_WRITE(sc, CSR_INT, CSR_INI_SET_MASK);
3949 	IWK_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff);
3950 
3951 	/* reset all Tx rings */
3952 	for (i = 0; i < IWK_NUM_QUEUES; i++)
3953 		iwk_reset_tx_ring(sc, &sc->sc_txq[i]);
3954 
3955 	/* reset Rx ring */
3956 	iwk_reset_rx_ring(sc);
3957 
3958 	iwk_mac_access_enter(sc);
3959 	iwk_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT);
3960 	iwk_mac_access_exit(sc);
3961 
3962 	DELAY(5);
3963 
3964 	iwk_stop_master(sc);
3965 
3966 	sc->sc_tx_timer = 0;
3967 	tmp = IWK_READ(sc, CSR_RESET);
3968 	IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET);
3969 
3970 	if (!(sc->sc_flags & IWK_F_QUIESCED))
3971 		mutex_exit(&sc->sc_glock);
3972 }
3973 
3974 /*
3975  * Naive implementation of the Adaptive Multi Rate Retry algorithm:
3976  * "IEEE 802.11 Rate Adaptation: A Practical Approach"
3977  * Mathieu Lacage, Hossein Manshaei, Thierry Turletti
3978  * INRIA Sophia - Projet Planete
3979  * http://www-sop.inria.fr/rapports/sophia/RR-5208.html
3980  */
3981 #define	is_success(amrr)	\
3982 	((amrr)->retrycnt < (amrr)->txcnt / 10)
3983 #define	is_failure(amrr)	\
3984 	((amrr)->retrycnt > (amrr)->txcnt / 3)
3985 #define	is_enough(amrr)		\
3986 	((amrr)->txcnt > 100)
3987 #define	is_min_rate(in)		\
3988 	((in)->in_txrate == 0)
3989 #define	is_max_rate(in)		\
3990 	((in)->in_txrate == (in)->in_rates.ir_nrates - 1)
3991 #define	increase_rate(in)	\
3992 	((in)->in_txrate++)
3993 #define	decrease_rate(in)	\
3994 	((in)->in_txrate--)
3995 #define	reset_cnt(amrr)		\
3996 	{ (amrr)->txcnt = (amrr)->retrycnt = 0; }
3997 
3998 #define	IWK_AMRR_MIN_SUCCESS_THRESHOLD	 1
3999 #define	IWK_AMRR_MAX_SUCCESS_THRESHOLD	15
4000 
4001 static void
4002 iwk_amrr_init(iwk_amrr_t *amrr)
4003 {
4004 	amrr->success = 0;
4005 	amrr->recovery = 0;
4006 	amrr->txcnt = amrr->retrycnt = 0;
4007 	amrr->success_threshold = IWK_AMRR_MIN_SUCCESS_THRESHOLD;
4008 }
4009 
4010 static void
4011 iwk_amrr_timeout(iwk_sc_t *sc)
4012 {
4013 	ieee80211com_t *ic = &sc->sc_ic;
4014 
4015 	IWK_DBG((IWK_DEBUG_RATECTL, "iwk_amrr_timeout() enter\n"));
4016 	if (ic->ic_opmode == IEEE80211_M_STA)
4017 		iwk_amrr_ratectl(NULL, ic->ic_bss);
4018 	else
4019 		ieee80211_iterate_nodes(&ic->ic_sta, iwk_amrr_ratectl, NULL);
4020 	sc->sc_clk = ddi_get_lbolt();
4021 }
4022 
4023 /* ARGSUSED */
4024 static void
4025 iwk_amrr_ratectl(void *arg, ieee80211_node_t *in)
4026 {
4027 	iwk_amrr_t *amrr = (iwk_amrr_t *)in;
4028 	int need_change = 0;
4029 
4030 	if (is_success(amrr) && is_enough(amrr)) {
4031 		amrr->success++;
4032 		if (amrr->success >= amrr->success_threshold &&
4033 		    !is_max_rate(in)) {
4034 			amrr->recovery = 1;
4035 			amrr->success = 0;
4036 			increase_rate(in);
4037 			IWK_DBG((IWK_DEBUG_RATECTL,
4038 			    "AMRR increasing rate %d (txcnt=%d retrycnt=%d)\n",
4039 			    in->in_txrate, amrr->txcnt, amrr->retrycnt));
4040 			need_change = 1;
4041 		} else {
4042 			amrr->recovery = 0;
4043 		}
4044 	} else if (is_failure(amrr)) {
4045 		amrr->success = 0;
4046 		if (!is_min_rate(in)) {
4047 			if (amrr->recovery) {
4048 				amrr->success_threshold++;
4049 				if (amrr->success_threshold >
4050 				    IWK_AMRR_MAX_SUCCESS_THRESHOLD)
4051 					amrr->success_threshold =
4052 					    IWK_AMRR_MAX_SUCCESS_THRESHOLD;
4053 			} else {
4054 				amrr->success_threshold =
4055 				    IWK_AMRR_MIN_SUCCESS_THRESHOLD;
4056 			}
4057 			decrease_rate(in);
4058 			IWK_DBG((IWK_DEBUG_RATECTL,
4059 			    "AMRR decreasing rate %d (txcnt=%d retrycnt=%d)\n",
4060 			    in->in_txrate, amrr->txcnt, amrr->retrycnt));
4061 			need_change = 1;
4062 		}
4063 		amrr->recovery = 0;	/* paper is incorrect */
4064 	}
4065 
4066 	if (is_enough(amrr) || need_change)
4067 		reset_cnt(amrr);
4068 }
4069 
4070 /*
4071  * calculate 4965 chipset's kelvin temperature according to
4072  * the data of init alive and satistics notification.
4073  * The details is described in iwk_calibration.h file
4074  */
4075 static int32_t iwk_curr_tempera(iwk_sc_t *sc)
4076 {
4077 	int32_t  tempera;
4078 	int32_t  r1, r2, r3;
4079 	uint32_t  r4_u;
4080 	int32_t   r4_s;
4081 
4082 	if (iwk_is_fat_channel(sc)) {
4083 		r1 = (int32_t)(sc->sc_card_alive_init.therm_r1[1]);
4084 		r2 = (int32_t)(sc->sc_card_alive_init.therm_r2[1]);
4085 		r3 = (int32_t)(sc->sc_card_alive_init.therm_r3[1]);
4086 		r4_u = sc->sc_card_alive_init.therm_r4[1];
4087 	} else {
4088 		r1 = (int32_t)(sc->sc_card_alive_init.therm_r1[0]);
4089 		r2 = (int32_t)(sc->sc_card_alive_init.therm_r2[0]);
4090 		r3 = (int32_t)(sc->sc_card_alive_init.therm_r3[0]);
4091 		r4_u = sc->sc_card_alive_init.therm_r4[0];
4092 	}
4093 
4094 	if (sc->sc_flags & IWK_F_STATISTICS) {
4095 		r4_s = (int32_t)(sc->sc_statistics.general.temperature <<
4096 		    (31-23)) >> (31-23);
4097 	} else {
4098 		r4_s = (int32_t)(r4_u << (31-23)) >> (31-23);
4099 	}
4100 
4101 	IWK_DBG((IWK_DEBUG_CALIBRATION, "temperature R[1-4]: %d %d %d %d\n",
4102 	    r1, r2, r3, r4_s));
4103 
4104 	if (r3 == r1) {
4105 		cmn_err(CE_WARN, "iwk_curr_tempera(): "
4106 		    "failed to calculate temperature"
4107 		    "because r3 = r1\n");
4108 		return (DDI_FAILURE);
4109 	}
4110 
4111 	tempera = TEMPERATURE_CALIB_A_VAL * (r4_s - r2);
4112 	tempera /= (r3 - r1);
4113 	tempera = (tempera*97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
4114 
4115 	IWK_DBG((IWK_DEBUG_CALIBRATION, "calculated temperature: %dK, %dC\n",
4116 	    tempera, KELVIN_TO_CELSIUS(tempera)));
4117 
4118 	return (tempera);
4119 }
4120 
4121 /* Determine whether 4965 is using 2.4 GHz band */
4122 static inline int iwk_is_24G_band(iwk_sc_t *sc)
4123 {
4124 	return (sc->sc_config.flags & RXON_FLG_BAND_24G_MSK);
4125 }
4126 
4127 /* Determine whether 4965 is using fat channel */
4128 static inline int iwk_is_fat_channel(iwk_sc_t *sc)
4129 {
4130 	return ((sc->sc_config.flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
4131 	    (sc->sc_config.flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK));
4132 }
4133 
4134 /*
4135  * In MIMO mode, determine which group 4965's current channel belong to.
4136  * For more infomation about "channel group",
4137  * please refer to iwk_calibration.h file
4138  */
4139 static int iwk_txpower_grp(uint16_t channel)
4140 {
4141 	if (channel >= CALIB_IWK_TX_ATTEN_GR5_FCH &&
4142 	    channel <= CALIB_IWK_TX_ATTEN_GR5_LCH) {
4143 		return (CALIB_CH_GROUP_5);
4144 	}
4145 
4146 	if (channel >= CALIB_IWK_TX_ATTEN_GR1_FCH &&
4147 	    channel <= CALIB_IWK_TX_ATTEN_GR1_LCH) {
4148 		return (CALIB_CH_GROUP_1);
4149 	}
4150 
4151 	if (channel >= CALIB_IWK_TX_ATTEN_GR2_FCH &&
4152 	    channel <= CALIB_IWK_TX_ATTEN_GR2_LCH) {
4153 		return (CALIB_CH_GROUP_2);
4154 	}
4155 
4156 	if (channel >= CALIB_IWK_TX_ATTEN_GR3_FCH &&
4157 	    channel <= CALIB_IWK_TX_ATTEN_GR3_LCH) {
4158 		return (CALIB_CH_GROUP_3);
4159 	}
4160 
4161 	if (channel >= CALIB_IWK_TX_ATTEN_GR4_FCH &&
4162 	    channel <= CALIB_IWK_TX_ATTEN_GR4_LCH) {
4163 		return (CALIB_CH_GROUP_4);
4164 	}
4165 
4166 	cmn_err(CE_WARN, "iwk_txpower_grp(): "
4167 	    "can't find txpower group for channel %d.\n", channel);
4168 
4169 	return (DDI_FAILURE);
4170 }
4171 
4172 /* 2.4 GHz */
4173 static uint16_t iwk_eep_band_1[14] = {
4174 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
4175 };
4176 
4177 /* 5.2 GHz bands */
4178 static uint16_t iwk_eep_band_2[13] = {
4179 	183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
4180 };
4181 
4182 static uint16_t iwk_eep_band_3[12] = {
4183 	34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
4184 };
4185 
4186 static uint16_t iwk_eep_band_4[11] = {
4187 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
4188 };
4189 
4190 static uint16_t iwk_eep_band_5[6] = {
4191 	145, 149, 153, 157, 161, 165
4192 };
4193 
4194 static uint16_t iwk_eep_band_6[7] = {
4195 	1, 2, 3, 4, 5, 6, 7
4196 };
4197 
4198 static uint16_t iwk_eep_band_7[11] = {
4199 	36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
4200 };
4201 
4202 /* Get regulatory data from eeprom for a given channel */
4203 static struct iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc,
4204     uint16_t channel,
4205     int is_24G, int is_fat, int is_hi_chan)
4206 {
4207 	int32_t i;
4208 	uint16_t chan;
4209 
4210 	if (is_fat) {  /* 11n mode */
4211 
4212 		if (is_hi_chan) {
4213 			chan = channel - 4;
4214 		} else {
4215 			chan = channel;
4216 		}
4217 
4218 		for (i = 0; i < 7; i++) {
4219 			if (iwk_eep_band_6[i] == chan) {
4220 				return (&sc->sc_eep_map.band_24_channels[i]);
4221 			}
4222 		}
4223 		for (i = 0; i < 11; i++) {
4224 			if (iwk_eep_band_7[i] == chan) {
4225 				return (&sc->sc_eep_map.band_52_channels[i]);
4226 			}
4227 		}
4228 	} else if (is_24G) {  /* 2.4 GHz band */
4229 		for (i = 0; i < 14; i++) {
4230 			if (iwk_eep_band_1[i] == channel) {
4231 				return (&sc->sc_eep_map.band_1_channels[i]);
4232 			}
4233 		}
4234 	} else {  /* 5 GHz band */
4235 		for (i = 0; i < 13; i++) {
4236 			if (iwk_eep_band_2[i] == channel) {
4237 				return (&sc->sc_eep_map.band_2_channels[i]);
4238 			}
4239 		}
4240 		for (i = 0; i < 12; i++) {
4241 			if (iwk_eep_band_3[i] == channel) {
4242 				return (&sc->sc_eep_map.band_3_channels[i]);
4243 			}
4244 		}
4245 		for (i = 0; i < 11; i++) {
4246 			if (iwk_eep_band_4[i] == channel) {
4247 				return (&sc->sc_eep_map.band_4_channels[i]);
4248 			}
4249 		}
4250 		for (i = 0; i < 6; i++) {
4251 			if (iwk_eep_band_5[i] == channel) {
4252 				return (&sc->sc_eep_map.band_5_channels[i]);
4253 			}
4254 		}
4255 	}
4256 
4257 	return (NULL);
4258 }
4259 
4260 /*
4261  * Determine which subband a given channel belongs
4262  * to in 2.4 GHz or 5 GHz band
4263  */
4264 static int32_t iwk_band_number(iwk_sc_t *sc, uint16_t channel)
4265 {
4266 	int32_t b_n = -1;
4267 
4268 	for (b_n = 0; b_n < EEP_TX_POWER_BANDS; b_n++) {
4269 		if (0 == sc->sc_eep_map.calib_info.band_info_tbl[b_n].ch_from) {
4270 			continue;
4271 		}
4272 
4273 		if ((channel >=
4274 		    (uint16_t)sc->sc_eep_map.calib_info.
4275 		    band_info_tbl[b_n].ch_from) &&
4276 		    (channel <=
4277 		    (uint16_t)sc->sc_eep_map.calib_info.
4278 		    band_info_tbl[b_n].ch_to)) {
4279 			break;
4280 		}
4281 	}
4282 
4283 	return (b_n);
4284 }
4285 
4286 /* Make a special division for interpolation operation */
4287 static int iwk_division(int32_t num, int32_t denom, int32_t *res)
4288 {
4289 	int32_t sign = 1;
4290 
4291 	if (num < 0) {
4292 		sign = -sign;
4293 		num = -num;
4294 	}
4295 
4296 	if (denom < 0) {
4297 		sign = -sign;
4298 		denom = -denom;
4299 	}
4300 
4301 	*res = ((num*2 + denom) / (denom*2)) * sign;
4302 
4303 	return (IWK_SUCCESS);
4304 }
4305 
4306 /* Make interpolation operation */
4307 static int32_t iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1,
4308     int32_t x2, int32_t y2)
4309 {
4310 	int32_t val;
4311 
4312 	if (x2 == x1) {
4313 		return (y1);
4314 	} else {
4315 		(void) iwk_division((x2-x)*(y1-y2), (x2-x1), &val);
4316 		return (val + y2);
4317 	}
4318 }
4319 
4320 /* Get interpolation measurement data of a given channel for all chains. */
4321 static int iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel,
4322     struct iwk_eep_calib_channel_info *chan_info)
4323 {
4324 	int32_t ban_n;
4325 	uint32_t ch1_n, ch2_n;
4326 	int32_t c, m;
4327 	struct iwk_eep_calib_measure *m1_p, *m2_p, *m_p;
4328 
4329 	/* determine subband number */
4330 	ban_n = iwk_band_number(sc, channel);
4331 	if (ban_n >= EEP_TX_POWER_BANDS) {
4332 		return (DDI_FAILURE);
4333 	}
4334 
4335 	ch1_n =
4336 	    (uint32_t)sc->sc_eep_map.calib_info.band_info_tbl[ban_n].ch1.ch_num;
4337 	ch2_n =
4338 	    (uint32_t)sc->sc_eep_map.calib_info.band_info_tbl[ban_n].ch2.ch_num;
4339 
4340 	chan_info->ch_num = (uint8_t)channel;  /* given channel number */
4341 
4342 	/*
4343 	 * go through all chains on chipset
4344 	 */
4345 	for (c = 0; c < EEP_TX_POWER_TX_CHAINS; c++) {
4346 		/*
4347 		 * go through all factory measurements
4348 		 */
4349 		for (m = 0; m < EEP_TX_POWER_MEASUREMENTS; m++) {
4350 			m1_p =
4351 			    &(sc->sc_eep_map.calib_info.
4352 			    band_info_tbl[ban_n].ch1.measure[c][m]);
4353 			m2_p =
4354 			    &(sc->sc_eep_map.calib_info.band_info_tbl[ban_n].
4355 			    ch2.measure[c][m]);
4356 			m_p = &(chan_info->measure[c][m]);
4357 
4358 			/*
4359 			 * make interpolation to get actual
4360 			 * Tx power for given channel
4361 			 */
4362 			m_p->actual_pow = iwk_interpolate_value(channel,
4363 			    ch1_n, m1_p->actual_pow,
4364 			    ch2_n, m2_p->actual_pow);
4365 
4366 			/* make interpolation to get index into gain table */
4367 			m_p->gain_idx = iwk_interpolate_value(channel,
4368 			    ch1_n, m1_p->gain_idx,
4369 			    ch2_n, m2_p->gain_idx);
4370 
4371 			/* make interpolation to get chipset temperature */
4372 			m_p->temperature = iwk_interpolate_value(channel,
4373 			    ch1_n, m1_p->temperature,
4374 			    ch2_n, m2_p->temperature);
4375 
4376 			/*
4377 			 * make interpolation to get power
4378 			 * amp detector level
4379 			 */
4380 			m_p->pa_det = iwk_interpolate_value(channel, ch1_n,
4381 			    m1_p->pa_det,
4382 			    ch2_n, m2_p->pa_det);
4383 		}
4384 	}
4385 
4386 	return (IWK_SUCCESS);
4387 }
4388 
4389 /*
4390  * Calculate voltage compensation for Tx power. For more infomation,
4391  * please refer to iwk_calibration.h file
4392  */
4393 static int32_t iwk_voltage_compensation(int32_t eep_voltage,
4394     int32_t curr_voltage)
4395 {
4396 	int32_t vol_comp = 0;
4397 
4398 	if ((TX_POWER_IWK_ILLEGAL_VOLTAGE == eep_voltage) ||
4399 	    (TX_POWER_IWK_ILLEGAL_VOLTAGE == curr_voltage)) {
4400 		return (vol_comp);
4401 	}
4402 
4403 	(void) iwk_division(curr_voltage-eep_voltage,
4404 	    TX_POWER_IWK_VOLTAGE_CODES_PER_03V, &vol_comp);
4405 
4406 	if (curr_voltage > eep_voltage) {
4407 		vol_comp *= 2;
4408 	}
4409 	if ((vol_comp < -2) || (vol_comp > 2)) {
4410 		vol_comp = 0;
4411 	}
4412 
4413 	return (vol_comp);
4414 }
4415 
4416 /*
4417  * Thermal compensation values for txpower for various frequency ranges ...
4418  * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust
4419  */
4420 static struct iwk_txpower_tempera_comp {
4421 	int32_t degrees_per_05db_a;
4422 	int32_t degrees_per_05db_a_denom;
4423 } txpower_tempera_comp_table[CALIB_CH_GROUP_MAX] = {
4424 	{9, 2},			/* group 0 5.2, ch  34-43 */
4425 	{4, 1},			/* group 1 5.2, ch  44-70 */
4426 	{4, 1},			/* group 2 5.2, ch  71-124 */
4427 	{4, 1},			/* group 3 5.2, ch 125-200 */
4428 	{3, 1}			/* group 4 2.4, ch   all */
4429 };
4430 
4431 /*
4432  * bit-rate-dependent table to prevent Tx distortion, in half-dB units,
4433  * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates.
4434  */
4435 static int32_t back_off_table[] = {
4436 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
4437 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
4438 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
4439 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
4440 	10			/* CCK */
4441 };
4442 
4443 /* determine minimum Tx power index in gain table */
4444 static int32_t iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G)
4445 {
4446 	if ((!is_24G) && ((rate_pow_idx & 7) <= 4)) {
4447 		return (MIN_TX_GAIN_INDEX_52GHZ_EXT);
4448 	}
4449 
4450 	return (MIN_TX_GAIN_INDEX);
4451 }
4452 
4453 /*
4454  * Determine DSP and radio gain according to temperature and other factors.
4455  * This function is the majority of Tx power calibration
4456  */
4457 static int iwk_txpower_table_cmd_init(iwk_sc_t *sc,
4458     struct iwk_tx_power_db *tp_db)
4459 {
4460 	int is_24G, is_fat, is_high_chan, is_mimo;
4461 	int c, r;
4462 	int32_t target_power;
4463 	int32_t tx_grp = CALIB_CH_GROUP_MAX;
4464 	uint16_t channel;
4465 	uint8_t saturation_power;
4466 	int32_t regu_power;
4467 	int32_t curr_regu_power;
4468 	struct iwk_eep_channel *eep_chan_p;
4469 	struct iwk_eep_calib_channel_info eep_chan_calib;
4470 	int32_t eep_voltage, init_voltage;
4471 	int32_t voltage_compensation;
4472 	int32_t temperature;
4473 	int32_t degrees_per_05db_num;
4474 	int32_t degrees_per_05db_denom;
4475 	struct iwk_eep_calib_measure *measure_p;
4476 	int32_t interpo_temp;
4477 	int32_t power_limit;
4478 	int32_t atten_value;
4479 	int32_t tempera_comp[2];
4480 	int32_t interpo_gain_idx[2];
4481 	int32_t interpo_actual_pow[2];
4482 	union iwk_tx_power_dual_stream txpower_gains;
4483 	int32_t txpower_gains_idx;
4484 
4485 	channel = sc->sc_config.chan;
4486 
4487 	/* 2.4 GHz or 5 GHz band */
4488 	is_24G = iwk_is_24G_band(sc);
4489 
4490 	/* fat channel or not */
4491 	is_fat = iwk_is_fat_channel(sc);
4492 
4493 	/*
4494 	 * using low half channel number or high half channel number
4495 	 * identify fat channel
4496 	 */
4497 	if (is_fat && (sc->sc_config.flags &
4498 	    RXON_FLG_CONTROL_CHANNEL_LOC_HIGH_MSK)) {
4499 		is_high_chan = 1;
4500 	}
4501 
4502 	if ((channel > 0) && (channel < 200)) {
4503 		/* get regulatory channel data from eeprom */
4504 		eep_chan_p = iwk_get_eep_channel(sc, channel, is_24G,
4505 		    is_fat, is_high_chan);
4506 		if (NULL == eep_chan_p) {
4507 			cmn_err(CE_WARN,
4508 			    "iwk_txpower_table_cmd_init(): "
4509 			    "can't get channel infomation\n");
4510 			return (DDI_FAILURE);
4511 		}
4512 	} else {
4513 		cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4514 		    "channel(%d) isn't in proper range\n",
4515 		    channel);
4516 		return (DDI_FAILURE);
4517 	}
4518 
4519 	/* initial value of Tx power */
4520 	sc->sc_user_txpower = (int32_t)eep_chan_p->max_power_avg;
4521 	if (sc->sc_user_txpower < IWK_TX_POWER_TARGET_POWER_MIN) {
4522 		cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4523 		    "user TX power is too weak\n");
4524 		return (DDI_FAILURE);
4525 	} else if (sc->sc_user_txpower > IWK_TX_POWER_TARGET_POWER_MAX) {
4526 		cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4527 		    "user TX power is too strong\n");
4528 		return (DDI_FAILURE);
4529 	}
4530 
4531 	target_power = 2 * sc->sc_user_txpower;
4532 
4533 	/* determine which group current channel belongs to */
4534 	tx_grp = iwk_txpower_grp(channel);
4535 	if (tx_grp < 0) {
4536 		return (tx_grp);
4537 	}
4538 
4539 
4540 	if (is_fat) {
4541 		if (is_high_chan) {
4542 			channel -= 2;
4543 		} else {
4544 			channel += 2;
4545 		}
4546 	}
4547 
4548 	/* determine saturation power */
4549 	if (is_24G) {
4550 		saturation_power =
4551 		    sc->sc_eep_map.calib_info.saturation_power24;
4552 	} else {
4553 		saturation_power =
4554 		    sc->sc_eep_map.calib_info.saturation_power52;
4555 	}
4556 
4557 	if (saturation_power < IWK_TX_POWER_SATURATION_MIN ||
4558 	    saturation_power > IWK_TX_POWER_SATURATION_MAX) {
4559 		if (is_24G) {
4560 			saturation_power = IWK_TX_POWER_DEFAULT_SATURATION_24;
4561 		} else {
4562 			saturation_power = IWK_TX_POWER_DEFAULT_SATURATION_52;
4563 		}
4564 	}
4565 
4566 	/* determine regulatory power */
4567 	regu_power = (int32_t)eep_chan_p->max_power_avg * 2;
4568 	if ((regu_power < IWK_TX_POWER_REGULATORY_MIN) ||
4569 	    (regu_power > IWK_TX_POWER_REGULATORY_MAX)) {
4570 		if (is_24G) {
4571 			regu_power = IWK_TX_POWER_DEFAULT_REGULATORY_24;
4572 		} else {
4573 			regu_power = IWK_TX_POWER_DEFAULT_REGULATORY_52;
4574 		}
4575 	}
4576 
4577 	/*
4578 	 * get measurement data for current channel
4579 	 * suach as temperature,index to gain table,actual Tx power
4580 	 */
4581 	(void) iwk_channel_interpolate(sc, channel, &eep_chan_calib);
4582 
4583 	eep_voltage = (int32_t)sc->sc_eep_map.calib_info.voltage;
4584 	init_voltage = (int32_t)sc->sc_card_alive_init.voltage;
4585 
4586 	/* calculate voltage compensation to Tx power */
4587 	voltage_compensation =
4588 	    iwk_voltage_compensation(eep_voltage, init_voltage);
4589 
4590 	if (sc->sc_tempera >= IWK_TX_POWER_TEMPERATURE_MIN) {
4591 		temperature = sc->sc_tempera;
4592 	} else {
4593 		temperature = IWK_TX_POWER_TEMPERATURE_MIN;
4594 	}
4595 	if (sc->sc_tempera <= IWK_TX_POWER_TEMPERATURE_MAX) {
4596 		temperature = sc->sc_tempera;
4597 	} else {
4598 		temperature = IWK_TX_POWER_TEMPERATURE_MAX;
4599 	}
4600 	temperature = KELVIN_TO_CELSIUS(temperature);
4601 
4602 	degrees_per_05db_num =
4603 	    txpower_tempera_comp_table[tx_grp].degrees_per_05db_a;
4604 	degrees_per_05db_denom =
4605 	    txpower_tempera_comp_table[tx_grp].degrees_per_05db_a_denom;
4606 
4607 	for (c = 0; c < 2; c++) {  /* go through all chains */
4608 		measure_p = &eep_chan_calib.measure[c][1];
4609 		interpo_temp = measure_p->temperature;
4610 
4611 		/* determine temperature compensation to Tx power */
4612 		(void) iwk_division(
4613 		    (temperature-interpo_temp)*degrees_per_05db_denom,
4614 		    degrees_per_05db_num, &tempera_comp[c]);
4615 
4616 		interpo_gain_idx[c] = measure_p->gain_idx;
4617 		interpo_actual_pow[c] = measure_p->actual_pow;
4618 	}
4619 
4620 	/*
4621 	 * go through all rate entries in Tx power table
4622 	 */
4623 	for (r = 0; r < POWER_TABLE_NUM_ENTRIES; r++) {
4624 		if (r & 0x8) {
4625 			/* need to lower regulatory power for MIMO mode */
4626 			curr_regu_power = regu_power -
4627 			    IWK_TX_POWER_MIMO_REGULATORY_COMPENSATION;
4628 			is_mimo = 1;
4629 		} else {
4630 			curr_regu_power = regu_power;
4631 			is_mimo = 0;
4632 		}
4633 
4634 		power_limit = saturation_power - back_off_table[r];
4635 		if (power_limit > curr_regu_power) {
4636 			/* final Tx power limit */
4637 			power_limit = curr_regu_power;
4638 		}
4639 
4640 		if (target_power > power_limit) {
4641 			target_power = power_limit; /* final target Tx power */
4642 		}
4643 
4644 		for (c = 0; c < 2; c++) {	  /* go through all Tx chains */
4645 			if (is_mimo) {
4646 				atten_value =
4647 				    sc->sc_card_alive_init.tx_atten[tx_grp][c];
4648 			} else {
4649 				atten_value = 0;
4650 			}
4651 
4652 			/*
4653 			 * calculate index in gain table
4654 			 * this step is very important
4655 			 */
4656 			txpower_gains_idx = interpo_gain_idx[c] -
4657 			    (target_power - interpo_actual_pow[c]) -
4658 			    tempera_comp[c] - voltage_compensation +
4659 			    atten_value;
4660 
4661 			if (txpower_gains_idx <
4662 			    iwk_min_power_index(r, is_24G)) {
4663 				txpower_gains_idx =
4664 				    iwk_min_power_index(r, is_24G);
4665 			}
4666 
4667 			if (!is_24G) {
4668 				/*
4669 				 * support negative index for 5 GHz
4670 				 * band
4671 				 */
4672 				txpower_gains_idx += 9;
4673 			}
4674 
4675 			if (POWER_TABLE_CCK_ENTRY == r) {
4676 				/* for CCK mode, make necessary attenuaton */
4677 				txpower_gains_idx +=
4678 				    IWK_TX_POWER_CCK_COMPENSATION_C_STEP;
4679 			}
4680 
4681 			if (txpower_gains_idx > 107) {
4682 				txpower_gains_idx = 107;
4683 			} else if (txpower_gains_idx < 0) {
4684 				txpower_gains_idx = 0;
4685 			}
4686 
4687 			/* search DSP and radio gains in gain table */
4688 			txpower_gains.s.radio_tx_gain[c] =
4689 			    gains_table[is_24G][txpower_gains_idx].radio;
4690 			txpower_gains.s.dsp_predis_atten[c] =
4691 			    gains_table[is_24G][txpower_gains_idx].dsp;
4692 
4693 			IWK_DBG((IWK_DEBUG_CALIBRATION,
4694 			    "rate_index: %d, "
4695 			    "gain_index %d, c: %d,is_mimo: %d\n",
4696 			    r, txpower_gains_idx, c, is_mimo));
4697 		}
4698 
4699 		/* initialize Tx power table */
4700 		if (r < POWER_TABLE_NUM_HT_OFDM_ENTRIES) {
4701 			tp_db->ht_ofdm_power[r].dw = txpower_gains.dw;
4702 		} else {
4703 			tp_db->legacy_cck_power.dw = txpower_gains.dw;
4704 		}
4705 	}
4706 
4707 	return (IWK_SUCCESS);
4708 }
4709 
4710 /*
4711  * make Tx power calibration to adjust Tx power.
4712  * This is completed by sending out Tx power table command.
4713  */
4714 static int iwk_tx_power_calibration(iwk_sc_t *sc)
4715 {
4716 	iwk_tx_power_table_cmd_t cmd;
4717 	int rv;
4718 
4719 	if (sc->sc_flags & IWK_F_SCANNING) {
4720 		return (IWK_SUCCESS);
4721 	}
4722 
4723 	/* necessary initialization to Tx power table command */
4724 	cmd.band = (uint8_t)iwk_is_24G_band(sc);
4725 	cmd.channel = sc->sc_config.chan;
4726 	cmd.channel_normal_width = 0;
4727 
4728 	/* initialize Tx power table */
4729 	rv = iwk_txpower_table_cmd_init(sc, &cmd.tx_power);
4730 	if (rv) {
4731 		cmn_err(CE_NOTE, "rv= %d\n", rv);
4732 		return (rv);
4733 	}
4734 
4735 	/* send out Tx power table command */
4736 	rv = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &cmd, sizeof (cmd), 1);
4737 	if (rv) {
4738 		return (rv);
4739 	}
4740 
4741 	/* record current temperature */
4742 	sc->sc_last_tempera = sc->sc_tempera;
4743 
4744 	return (IWK_SUCCESS);
4745 }
4746 
4747 /* This function is the handler of statistics notification from uCode */
4748 static void iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc)
4749 {
4750 	int is_diff;
4751 	struct iwk_notif_statistics *statistics_p =
4752 	    (struct iwk_notif_statistics *)(desc + 1);
4753 
4754 	mutex_enter(&sc->sc_glock);
4755 
4756 	is_diff = (sc->sc_statistics.general.temperature !=
4757 	    statistics_p->general.temperature) ||
4758 	    ((sc->sc_statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK) !=
4759 	    (statistics_p->flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK));
4760 
4761 	/* update statistics data */
4762 	(void) memcpy(&sc->sc_statistics, statistics_p,
4763 	    sizeof (struct iwk_notif_statistics));
4764 
4765 	sc->sc_flags |= IWK_F_STATISTICS;
4766 
4767 	if (!(sc->sc_flags & IWK_F_SCANNING)) {
4768 		/* make Receiver gain balance calibration */
4769 		(void) iwk_rxgain_diff(sc);
4770 
4771 		/* make Receiver sensitivity calibration */
4772 		(void) iwk_rx_sens(sc);
4773 	}
4774 
4775 
4776 	if (!is_diff) {
4777 		mutex_exit(&sc->sc_glock);
4778 		return;
4779 	}
4780 
4781 	/* calibration current temperature of 4965 chipset */
4782 	sc->sc_tempera = iwk_curr_tempera(sc);
4783 
4784 	/* distinct temperature change will trigger Tx power calibration */
4785 	if (((sc->sc_tempera - sc->sc_last_tempera) >= 3) ||
4786 	    ((sc->sc_last_tempera - sc->sc_tempera) >= 3)) {
4787 		/* make Tx power calibration */
4788 		(void) iwk_tx_power_calibration(sc);
4789 	}
4790 
4791 	mutex_exit(&sc->sc_glock);
4792 }
4793 
4794 /* Determine this station is in associated state or not */
4795 static int iwk_is_associated(iwk_sc_t *sc)
4796 {
4797 	return (sc->sc_config.filter_flags & RXON_FILTER_ASSOC_MSK);
4798 }
4799 
4800 /* Make necessary preparation for Receiver gain balance calibration */
4801 static int iwk_rxgain_diff_init(iwk_sc_t *sc)
4802 {
4803 	int i, rv;
4804 	struct iwk_calibration_cmd cmd;
4805 	struct iwk_rx_gain_diff *gain_diff_p;
4806 
4807 	gain_diff_p = &sc->sc_rxgain_diff;
4808 
4809 	(void) memset(gain_diff_p, 0, sizeof (struct iwk_rx_gain_diff));
4810 	(void) memset(&cmd, 0, sizeof (struct iwk_calibration_cmd));
4811 
4812 	for (i = 0; i < RX_CHAINS_NUM; i++) {
4813 		gain_diff_p->gain_diff_chain[i] = CHAIN_GAIN_DIFF_INIT_VAL;
4814 	}
4815 
4816 	if (iwk_is_associated(sc)) {
4817 		cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
4818 		cmd.diff_gain_a = 0;
4819 		cmd.diff_gain_b = 0;
4820 		cmd.diff_gain_c = 0;
4821 
4822 		/* assume the gains of every Rx chains is balanceable */
4823 		rv = iwk_cmd(sc, REPLY_PHY_CALIBRATION_CMD, &cmd,
4824 		    sizeof (cmd), 1);
4825 		if (rv) {
4826 			return (rv);
4827 		}
4828 
4829 		gain_diff_p->state = IWK_GAIN_DIFF_ACCUMULATE;
4830 	}
4831 
4832 	return (IWK_SUCCESS);
4833 }
4834 
4835 /*
4836  * make Receiver gain balance to balance Rx gain between Rx chains
4837  * and determine which chain is disconnected
4838  */
4839 static int iwk_rxgain_diff(iwk_sc_t *sc)
4840 {
4841 	int i, is_24G, rv;
4842 	int max_beacon_chain_n;
4843 	int min_noise_chain_n;
4844 	uint16_t channel_n;
4845 	int32_t beacon_diff;
4846 	int32_t noise_diff;
4847 	uint32_t noise_chain_a, noise_chain_b, noise_chain_c;
4848 	uint32_t beacon_chain_a, beacon_chain_b, beacon_chain_c;
4849 	struct iwk_calibration_cmd cmd;
4850 	uint32_t beacon_aver[RX_CHAINS_NUM] = {0xFFFFFFFF};
4851 	uint32_t noise_aver[RX_CHAINS_NUM] = {0xFFFFFFFF};
4852 	struct statistics_rx_non_phy *rx_general_p =
4853 	    &sc->sc_statistics.rx.general;
4854 	struct iwk_rx_gain_diff *gain_diff_p = &sc->sc_rxgain_diff;
4855 
4856 	if (INTERFERENCE_DATA_AVAILABLE !=
4857 	    rx_general_p->interference_data_flag) {
4858 		return (IWK_SUCCESS);
4859 	}
4860 
4861 	if (IWK_GAIN_DIFF_ACCUMULATE != gain_diff_p->state) {
4862 		return (IWK_SUCCESS);
4863 	}
4864 
4865 	is_24G = iwk_is_24G_band(sc);
4866 	channel_n = sc->sc_config.chan;	 /* channel number */
4867 
4868 	if ((channel_n != (sc->sc_statistics.flag >> 16)) ||
4869 	    ((STATISTICS_REPLY_FLG_BAND_24G_MSK ==
4870 	    (sc->sc_statistics.flag & STATISTICS_REPLY_FLG_BAND_24G_MSK)) &&
4871 	    !is_24G)) {
4872 		return (IWK_SUCCESS);
4873 	}
4874 
4875 	/* Rx chain's noise strength from statistics notification */
4876 	noise_chain_a = rx_general_p->beacon_silence_rssi_a & 0xFF;
4877 	noise_chain_b = rx_general_p->beacon_silence_rssi_b & 0xFF;
4878 	noise_chain_c = rx_general_p->beacon_silence_rssi_c & 0xFF;
4879 
4880 	/* Rx chain's beacon strength from statistics notification */
4881 	beacon_chain_a = rx_general_p->beacon_rssi_a & 0xFF;
4882 	beacon_chain_b = rx_general_p->beacon_rssi_b & 0xFF;
4883 	beacon_chain_c = rx_general_p->beacon_rssi_c & 0xFF;
4884 
4885 	gain_diff_p->beacon_count++;
4886 
4887 	/* accumulate chain's noise strength */
4888 	gain_diff_p->noise_stren_a += noise_chain_a;
4889 	gain_diff_p->noise_stren_b += noise_chain_b;
4890 	gain_diff_p->noise_stren_c += noise_chain_c;
4891 
4892 	/* accumulate chain's beacon strength */
4893 	gain_diff_p->beacon_stren_a += beacon_chain_a;
4894 	gain_diff_p->beacon_stren_b += beacon_chain_b;
4895 	gain_diff_p->beacon_stren_c += beacon_chain_c;
4896 
4897 	if (BEACON_NUM_20 == gain_diff_p->beacon_count) {
4898 		/* calculate average beacon strength */
4899 		beacon_aver[0] = (gain_diff_p->beacon_stren_a) / BEACON_NUM_20;
4900 		beacon_aver[1] = (gain_diff_p->beacon_stren_b) / BEACON_NUM_20;
4901 		beacon_aver[2] = (gain_diff_p->beacon_stren_c) / BEACON_NUM_20;
4902 
4903 		/* calculate average noise strength */
4904 		noise_aver[0] = (gain_diff_p->noise_stren_a) / BEACON_NUM_20;
4905 		noise_aver[1] = (gain_diff_p->noise_stren_b) / BEACON_NUM_20;
4906 		noise_aver[2] = (gain_diff_p->noise_stren_b) / BEACON_NUM_20;
4907 
4908 		/* determine maximum beacon strength among 3 chains */
4909 		if ((beacon_aver[0] >= beacon_aver[1]) &&
4910 		    (beacon_aver[0] >= beacon_aver[2])) {
4911 			max_beacon_chain_n = 0;
4912 			gain_diff_p->connected_chains = 1 << 0;
4913 		} else if (beacon_aver[1] >= beacon_aver[2]) {
4914 			max_beacon_chain_n = 1;
4915 			gain_diff_p->connected_chains = 1 << 1;
4916 		} else {
4917 			max_beacon_chain_n = 2;
4918 			gain_diff_p->connected_chains = 1 << 2;
4919 		}
4920 
4921 		/* determine which chain is disconnected */
4922 		for (i = 0; i < RX_CHAINS_NUM; i++) {
4923 			if (i != max_beacon_chain_n) {
4924 				beacon_diff = beacon_aver[max_beacon_chain_n] -
4925 				    beacon_aver[i];
4926 				if (beacon_diff > MAX_ALLOWED_DIFF) {
4927 					gain_diff_p->disconnect_chain[i] = 1;
4928 				} else {
4929 					gain_diff_p->connected_chains |=
4930 					    (1 << i);
4931 				}
4932 			}
4933 		}
4934 
4935 		/*
4936 		 * if chain A and B are both disconnected,
4937 		 * assume the stronger in beacon strength is connected
4938 		 */
4939 		if (gain_diff_p->disconnect_chain[0] &&
4940 		    gain_diff_p->disconnect_chain[1]) {
4941 			if (beacon_aver[0] >= beacon_aver[1]) {
4942 				gain_diff_p->disconnect_chain[0] = 0;
4943 				gain_diff_p->connected_chains |= (1 << 0);
4944 			} else {
4945 				gain_diff_p->disconnect_chain[1] = 0;
4946 				gain_diff_p->connected_chains |= (1 << 1);
4947 			}
4948 		}
4949 
4950 		/* determine minimum noise strength among 3 chains */
4951 		if (!gain_diff_p->disconnect_chain[0]) {
4952 			min_noise_chain_n = 0;
4953 
4954 			for (i = 0; i < RX_CHAINS_NUM; i++) {
4955 				if (!gain_diff_p->disconnect_chain[i] &&
4956 				    (noise_aver[i] <=
4957 				    noise_aver[min_noise_chain_n])) {
4958 					min_noise_chain_n = i;
4959 				}
4960 
4961 			}
4962 		} else {
4963 			min_noise_chain_n = 1;
4964 
4965 			for (i = 0; i < RX_CHAINS_NUM; i++) {
4966 				if (!gain_diff_p->disconnect_chain[i] &&
4967 				    (noise_aver[i] <=
4968 				    noise_aver[min_noise_chain_n])) {
4969 					min_noise_chain_n = i;
4970 				}
4971 			}
4972 		}
4973 
4974 		gain_diff_p->gain_diff_chain[min_noise_chain_n] = 0;
4975 
4976 		/* determine gain difference between chains */
4977 		for (i = 0; i < RX_CHAINS_NUM; i++) {
4978 			if (!gain_diff_p->disconnect_chain[i] &&
4979 			    (CHAIN_GAIN_DIFF_INIT_VAL ==
4980 			    gain_diff_p->gain_diff_chain[i])) {
4981 
4982 				noise_diff = noise_aver[i] -
4983 				    noise_aver[min_noise_chain_n];
4984 				gain_diff_p->gain_diff_chain[i] =
4985 				    (uint8_t)((noise_diff * 10) / 15);
4986 
4987 				if (gain_diff_p->gain_diff_chain[i] > 3) {
4988 					gain_diff_p->gain_diff_chain[i] = 3;
4989 				}
4990 
4991 				gain_diff_p->gain_diff_chain[i] |= (1 << 2);
4992 			} else {
4993 				gain_diff_p->gain_diff_chain[i] = 0;
4994 			}
4995 		}
4996 
4997 		if (!gain_diff_p->gain_diff_send) {
4998 			gain_diff_p->gain_diff_send = 1;
4999 
5000 			(void) memset(&cmd, 0, sizeof (cmd));
5001 
5002 			cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
5003 			cmd.diff_gain_a = gain_diff_p->gain_diff_chain[0];
5004 			cmd.diff_gain_b = gain_diff_p->gain_diff_chain[1];
5005 			cmd.diff_gain_c = gain_diff_p->gain_diff_chain[2];
5006 
5007 			/*
5008 			 * send out PHY calibration command to
5009 			 * adjust every chain's Rx gain
5010 			 */
5011 			rv = iwk_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
5012 			    &cmd, sizeof (cmd), 1);
5013 			if (rv) {
5014 				return (rv);
5015 			}
5016 
5017 			gain_diff_p->state = IWK_GAIN_DIFF_CALIBRATED;
5018 		}
5019 
5020 		gain_diff_p->beacon_stren_a = 0;
5021 		gain_diff_p->beacon_stren_b = 0;
5022 		gain_diff_p->beacon_stren_c = 0;
5023 
5024 		gain_diff_p->noise_stren_a = 0;
5025 		gain_diff_p->noise_stren_b = 0;
5026 		gain_diff_p->noise_stren_c = 0;
5027 	}
5028 
5029 	return (IWK_SUCCESS);
5030 }
5031 
5032 /* Make necessary preparation for Receiver sensitivity calibration */
5033 static int iwk_rx_sens_init(iwk_sc_t *sc)
5034 {
5035 	int i, rv;
5036 	struct iwk_rx_sensitivity_cmd cmd;
5037 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5038 
5039 	(void) memset(&cmd, 0, sizeof (struct iwk_rx_sensitivity_cmd));
5040 	(void) memset(rx_sens_p, 0, sizeof (struct iwk_rx_sensitivity));
5041 
5042 	rx_sens_p->auto_corr_ofdm_x4 = 90;
5043 	rx_sens_p->auto_corr_mrc_ofdm_x4 = 170;
5044 	rx_sens_p->auto_corr_ofdm_x1 = 105;
5045 	rx_sens_p->auto_corr_mrc_ofdm_x1 = 220;
5046 
5047 	rx_sens_p->auto_corr_cck_x4 = 125;
5048 	rx_sens_p->auto_corr_mrc_cck_x4 = 200;
5049 	rx_sens_p->min_energy_det_cck = 100;
5050 
5051 	rx_sens_p->flags &= (~IWK_SENSITIVITY_CALIB_ALLOW_MSK);
5052 	rx_sens_p->flags &= (~IWK_SENSITIVITY_OFDM_UPDATE_MSK);
5053 	rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
5054 
5055 	rx_sens_p->last_bad_plcp_cnt_ofdm = 0;
5056 	rx_sens_p->last_false_alarm_cnt_ofdm = 0;
5057 	rx_sens_p->last_bad_plcp_cnt_cck = 0;
5058 	rx_sens_p->last_false_alarm_cnt_cck = 0;
5059 
5060 	rx_sens_p->cck_curr_state = IWK_TOO_MANY_FALSE_ALARM;
5061 	rx_sens_p->cck_prev_state = IWK_TOO_MANY_FALSE_ALARM;
5062 	rx_sens_p->cck_no_false_alarm_num = 0;
5063 	rx_sens_p->cck_beacon_idx = 0;
5064 
5065 	for (i = 0; i < 10; i++) {
5066 		rx_sens_p->cck_beacon_min[i] = 0;
5067 	}
5068 
5069 	rx_sens_p->cck_noise_idx = 0;
5070 	rx_sens_p->cck_noise_ref = 0;
5071 
5072 	for (i = 0; i < 20; i++) {
5073 		rx_sens_p->cck_noise_max[i] = 0;
5074 	}
5075 
5076 	rx_sens_p->cck_noise_diff = 0;
5077 	rx_sens_p->cck_no_false_alarm_num = 0;
5078 
5079 	cmd.control = IWK_SENSITIVITY_CONTROL_WORK_TABLE;
5080 
5081 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
5082 	    rx_sens_p->auto_corr_ofdm_x4;
5083 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
5084 	    rx_sens_p->auto_corr_mrc_ofdm_x4;
5085 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
5086 	    rx_sens_p->auto_corr_ofdm_x1;
5087 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
5088 	    rx_sens_p->auto_corr_mrc_ofdm_x1;
5089 
5090 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
5091 	    rx_sens_p->auto_corr_cck_x4;
5092 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
5093 	    rx_sens_p->auto_corr_mrc_cck_x4;
5094 	cmd.table[MIN_ENERGY_CCK_DET_IDX] = rx_sens_p->min_energy_det_cck;
5095 
5096 	cmd.table[MIN_ENERGY_OFDM_DET_IDX] = 100;
5097 	cmd.table[BARKER_CORR_TH_ADD_MIN_IDX] = 190;
5098 	cmd.table[BARKER_CORR_TH_ADD_MIN_MRC_IDX] = 390;
5099 	cmd.table[PTAM_ENERGY_TH_IDX] = 62;
5100 
5101 	/* at first, set up Rx to maximum sensitivity */
5102 	rv = iwk_cmd(sc, SENSITIVITY_CMD, &cmd, sizeof (cmd), 1);
5103 	if (rv) {
5104 		cmn_err(CE_WARN, "iwk_rx_sens_init(): "
5105 		    "in the process of initialization, "
5106 		    "failed to send rx sensitivity command\n");
5107 		return (rv);
5108 	}
5109 
5110 	rx_sens_p->flags |= IWK_SENSITIVITY_CALIB_ALLOW_MSK;
5111 
5112 	return (IWK_SUCCESS);
5113 }
5114 
5115 /*
5116  * make Receiver sensitivity calibration to adjust every chain's Rx sensitivity.
5117  * for more infomation, please refer to iwk_calibration.h file
5118  */
5119 static int iwk_rx_sens(iwk_sc_t *sc)
5120 {
5121 	int rv;
5122 	uint32_t actual_rx_time;
5123 	struct statistics_rx_non_phy *rx_general_p =
5124 	    &sc->sc_statistics.rx.general;
5125 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5126 	struct iwk_rx_sensitivity_cmd cmd;
5127 
5128 	if (!(rx_sens_p->flags & IWK_SENSITIVITY_CALIB_ALLOW_MSK)) {
5129 		cmn_err(CE_WARN, "iwk_rx_sens(): "
5130 		    "sensitivity initialization has not finished.\n");
5131 		return (DDI_FAILURE);
5132 	}
5133 
5134 	if (INTERFERENCE_DATA_AVAILABLE !=
5135 	    rx_general_p->interference_data_flag) {
5136 		cmn_err(CE_WARN, "iwk_rx_sens(): "
5137 		    "can't make rx sensitivity calibration,"
5138 		    "because of invalid statistics\n");
5139 		return (DDI_FAILURE);
5140 	}
5141 
5142 	actual_rx_time = rx_general_p->channel_load;
5143 	if (!actual_rx_time) {
5144 		IWK_DBG((IWK_DEBUG_CALIBRATION, "iwk_rx_sens(): "
5145 		    "can't make rx sensitivity calibration,"
5146 		    "because has not enough rx time\n"));
5147 		return (DDI_FAILURE);
5148 	}
5149 
5150 	/* make Rx sensitivity calibration for OFDM mode */
5151 	rv = iwk_ofdm_sens(sc, actual_rx_time);
5152 	if (rv) {
5153 		return (rv);
5154 	}
5155 
5156 	/* make Rx sensitivity calibration for CCK mode */
5157 	rv = iwk_cck_sens(sc, actual_rx_time);
5158 	if (rv) {
5159 		return (rv);
5160 	}
5161 
5162 	/*
5163 	 * if the sum of false alarm had not changed, nothing will be done
5164 	 */
5165 	if ((!(rx_sens_p->flags & IWK_SENSITIVITY_OFDM_UPDATE_MSK)) &&
5166 	    (!(rx_sens_p->flags & IWK_SENSITIVITY_CCK_UPDATE_MSK))) {
5167 		return (IWK_SUCCESS);
5168 	}
5169 
5170 	cmd.control = IWK_SENSITIVITY_CONTROL_WORK_TABLE;
5171 
5172 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
5173 	    rx_sens_p->auto_corr_ofdm_x4;
5174 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
5175 	    rx_sens_p->auto_corr_mrc_ofdm_x4;
5176 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
5177 	    rx_sens_p->auto_corr_ofdm_x1;
5178 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
5179 	    rx_sens_p->auto_corr_mrc_ofdm_x1;
5180 
5181 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
5182 	    rx_sens_p->auto_corr_cck_x4;
5183 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
5184 	    rx_sens_p->auto_corr_mrc_cck_x4;
5185 	cmd.table[MIN_ENERGY_CCK_DET_IDX] =
5186 	    rx_sens_p->min_energy_det_cck;
5187 
5188 	cmd.table[MIN_ENERGY_OFDM_DET_IDX] = 100;
5189 	cmd.table[BARKER_CORR_TH_ADD_MIN_IDX] = 190;
5190 	cmd.table[BARKER_CORR_TH_ADD_MIN_MRC_IDX] = 390;
5191 	cmd.table[PTAM_ENERGY_TH_IDX] = 62;
5192 
5193 	/*
5194 	 * send sensitivity command to complete actual sensitivity calibration
5195 	 */
5196 	rv = iwk_cmd(sc, SENSITIVITY_CMD, &cmd, sizeof (cmd), 1);
5197 	if (rv) {
5198 		cmn_err(CE_WARN, "iwk_rx_sens(): "
5199 		    "fail to send rx sensitivity command\n");
5200 		return (rv);
5201 	}
5202 
5203 	return (IWK_SUCCESS);
5204 
5205 }
5206 
5207 /*
5208  * make Rx sensitivity calibration for CCK mode.
5209  * This is preparing parameters for Sensitivity command
5210  */
5211 static int iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time)
5212 {
5213 	int i;
5214 	uint8_t noise_a, noise_b, noise_c;
5215 	uint8_t max_noise_abc, max_noise_20;
5216 	uint32_t beacon_a, beacon_b, beacon_c;
5217 	uint32_t min_beacon_abc, max_beacon_10;
5218 	uint32_t cck_fa, cck_bp;
5219 	uint32_t cck_sum_fa_bp;
5220 	uint32_t temp;
5221 	struct statistics_rx_non_phy *rx_general_p =
5222 	    &sc->sc_statistics.rx.general;
5223 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5224 
5225 	cck_fa = sc->sc_statistics.rx.cck.false_alarm_cnt;
5226 	cck_bp = sc->sc_statistics.rx.cck.plcp_err;
5227 
5228 	/* accumulate false alarm */
5229 	if (rx_sens_p->last_false_alarm_cnt_cck > cck_fa) {
5230 		temp = rx_sens_p->last_false_alarm_cnt_cck;
5231 		rx_sens_p->last_false_alarm_cnt_cck = cck_fa;
5232 		cck_fa += (0xFFFFFFFF - temp);
5233 	} else {
5234 		cck_fa -= rx_sens_p->last_false_alarm_cnt_cck;
5235 		rx_sens_p->last_false_alarm_cnt_cck += cck_fa;
5236 	}
5237 
5238 	/* accumulate bad plcp */
5239 	if (rx_sens_p->last_bad_plcp_cnt_cck > cck_bp) {
5240 		temp = rx_sens_p->last_bad_plcp_cnt_cck;
5241 		rx_sens_p->last_bad_plcp_cnt_cck = cck_bp;
5242 		cck_bp += (0xFFFFFFFF - temp);
5243 	} else {
5244 		cck_bp -= rx_sens_p->last_bad_plcp_cnt_cck;
5245 		rx_sens_p->last_bad_plcp_cnt_cck += cck_bp;
5246 	}
5247 
5248 	/*
5249 	 * calculate relative value
5250 	 */
5251 	cck_sum_fa_bp = (cck_fa + cck_bp) * 200 * 1024;
5252 	rx_sens_p->cck_noise_diff = 0;
5253 
5254 	noise_a =
5255 	    (uint8_t)((rx_general_p->beacon_silence_rssi_a & 0xFF00) >> 8);
5256 	noise_b =
5257 	    (uint8_t)((rx_general_p->beacon_silence_rssi_b & 0xFF00) >> 8);
5258 	noise_c =
5259 	    (uint8_t)((rx_general_p->beacon_silence_rssi_c & 0xFF00) >> 8);
5260 
5261 	beacon_a = rx_general_p->beacon_energy_a;
5262 	beacon_b = rx_general_p->beacon_energy_b;
5263 	beacon_c = rx_general_p->beacon_energy_c;
5264 
5265 	/* determine maximum noise among 3 chains */
5266 	if ((noise_a >= noise_b) && (noise_a >= noise_c)) {
5267 		max_noise_abc = noise_a;
5268 	} else if (noise_b >= noise_c) {
5269 		max_noise_abc = noise_b;
5270 	} else {
5271 		max_noise_abc = noise_c;
5272 	}
5273 
5274 	/* record maximum noise among 3 chains */
5275 	rx_sens_p->cck_noise_max[rx_sens_p->cck_noise_idx] = max_noise_abc;
5276 	rx_sens_p->cck_noise_idx++;
5277 	if (rx_sens_p->cck_noise_idx >= 20) {
5278 		rx_sens_p->cck_noise_idx = 0;
5279 	}
5280 
5281 	/* determine maximum noise among 20 max noise */
5282 	max_noise_20 = rx_sens_p->cck_noise_max[0];
5283 	for (i = 0; i < 20; i++) {
5284 		if (rx_sens_p->cck_noise_max[i] >= max_noise_20) {
5285 			max_noise_20 = rx_sens_p->cck_noise_max[i];
5286 		}
5287 	}
5288 
5289 	/* determine minimum beacon among 3 chains */
5290 	if ((beacon_a <= beacon_b) && (beacon_a <= beacon_c)) {
5291 		min_beacon_abc = beacon_a;
5292 	} else if (beacon_b <= beacon_c) {
5293 		min_beacon_abc = beacon_b;
5294 	} else {
5295 		min_beacon_abc = beacon_c;
5296 	}
5297 
5298 	/* record miminum beacon among 3 chains */
5299 	rx_sens_p->cck_beacon_min[rx_sens_p->cck_beacon_idx] = min_beacon_abc;
5300 	rx_sens_p->cck_beacon_idx++;
5301 	if (rx_sens_p->cck_beacon_idx >= 10) {
5302 		rx_sens_p->cck_beacon_idx = 0;
5303 	}
5304 
5305 	/* determine maximum beacon among 10 miminum beacon among 3 chains */
5306 	max_beacon_10 = rx_sens_p->cck_beacon_min[0];
5307 	for (i = 0; i < 10; i++) {
5308 		if (rx_sens_p->cck_beacon_min[i] >= max_beacon_10) {
5309 			max_beacon_10 = rx_sens_p->cck_beacon_min[i];
5310 		}
5311 	}
5312 
5313 	/* add a little margin */
5314 	max_beacon_10 += 6;
5315 
5316 	/* record the count of having no false alarms */
5317 	if (cck_sum_fa_bp < (5 * actual_rx_time)) {
5318 		rx_sens_p->cck_no_false_alarm_num++;
5319 	} else {
5320 		rx_sens_p->cck_no_false_alarm_num = 0;
5321 	}
5322 
5323 	/*
5324 	 * adjust parameters in sensitivity command
5325 	 * according to different status.
5326 	 * for more infomation, please refer to iwk_calibration.h file
5327 	 */
5328 	if (cck_sum_fa_bp > (50 * actual_rx_time)) {
5329 		rx_sens_p->cck_curr_state = IWK_TOO_MANY_FALSE_ALARM;
5330 
5331 		if (rx_sens_p->auto_corr_cck_x4 > 160) {
5332 			rx_sens_p->cck_noise_ref = max_noise_20;
5333 
5334 			if (rx_sens_p->min_energy_det_cck > 2) {
5335 				rx_sens_p->min_energy_det_cck -= 2;
5336 			}
5337 		}
5338 
5339 		if (rx_sens_p->auto_corr_cck_x4 < 160) {
5340 			rx_sens_p->auto_corr_cck_x4 = 160 + 1;
5341 		} else {
5342 			if ((rx_sens_p->auto_corr_cck_x4 + 3) < 200) {
5343 				rx_sens_p->auto_corr_cck_x4 += 3;
5344 			} else {
5345 				rx_sens_p->auto_corr_cck_x4 = 200;
5346 			}
5347 		}
5348 
5349 		if ((rx_sens_p->auto_corr_mrc_cck_x4 + 3) < 400) {
5350 			rx_sens_p->auto_corr_mrc_cck_x4 += 3;
5351 		} else {
5352 			rx_sens_p->auto_corr_mrc_cck_x4 = 400;
5353 		}
5354 
5355 		rx_sens_p->flags |= IWK_SENSITIVITY_CCK_UPDATE_MSK;
5356 
5357 	} else if (cck_sum_fa_bp < (5 * actual_rx_time)) {
5358 		rx_sens_p->cck_curr_state = IWK_TOO_FEW_FALSE_ALARM;
5359 
5360 		rx_sens_p->cck_noise_diff = (int32_t)rx_sens_p->cck_noise_ref -
5361 		    (int32_t)max_noise_20;
5362 
5363 		if ((rx_sens_p->cck_prev_state != IWK_TOO_MANY_FALSE_ALARM) &&
5364 		    ((rx_sens_p->cck_noise_diff > 2) ||
5365 		    (rx_sens_p->cck_no_false_alarm_num > 100))) {
5366 			if ((rx_sens_p->min_energy_det_cck + 2) < 97) {
5367 				rx_sens_p->min_energy_det_cck += 2;
5368 			} else {
5369 				rx_sens_p->min_energy_det_cck = 97;
5370 			}
5371 
5372 			if ((rx_sens_p->auto_corr_cck_x4 - 3) > 125) {
5373 				rx_sens_p->auto_corr_cck_x4 -= 3;
5374 			} else {
5375 				rx_sens_p->auto_corr_cck_x4 = 125;
5376 			}
5377 
5378 			if ((rx_sens_p->auto_corr_mrc_cck_x4 -3) > 200) {
5379 				rx_sens_p->auto_corr_mrc_cck_x4 -= 3;
5380 			} else {
5381 				rx_sens_p->auto_corr_mrc_cck_x4 = 200;
5382 			}
5383 
5384 			rx_sens_p->flags |= IWK_SENSITIVITY_CCK_UPDATE_MSK;
5385 		} else {
5386 			rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
5387 		}
5388 	} else {
5389 		rx_sens_p->cck_curr_state = IWK_GOOD_RANGE_FALSE_ALARM;
5390 
5391 		rx_sens_p->cck_noise_ref = max_noise_20;
5392 
5393 		if (IWK_TOO_MANY_FALSE_ALARM == rx_sens_p->cck_prev_state) {
5394 			rx_sens_p->min_energy_det_cck -= 8;
5395 		}
5396 
5397 		rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
5398 	}
5399 
5400 	if (rx_sens_p->min_energy_det_cck < max_beacon_10) {
5401 		rx_sens_p->min_energy_det_cck = (uint16_t)max_beacon_10;
5402 	}
5403 
5404 	rx_sens_p->cck_prev_state = rx_sens_p->cck_curr_state;
5405 
5406 	return (IWK_SUCCESS);
5407 }
5408 
5409 /*
5410  * make Rx sensitivity calibration for OFDM mode.
5411  * This is preparing parameters for Sensitivity command
5412  */
5413 static int iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time)
5414 {
5415 	uint32_t temp;
5416 	uint16_t temp1;
5417 	uint32_t ofdm_fa, ofdm_bp;
5418 	uint32_t ofdm_sum_fa_bp;
5419 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5420 
5421 	ofdm_fa = sc->sc_statistics.rx.ofdm.false_alarm_cnt;
5422 	ofdm_bp = sc->sc_statistics.rx.ofdm.plcp_err;
5423 
5424 	/* accumulate false alarm */
5425 	if (rx_sens_p->last_false_alarm_cnt_ofdm > ofdm_fa) {
5426 		temp = rx_sens_p->last_false_alarm_cnt_ofdm;
5427 		rx_sens_p->last_false_alarm_cnt_ofdm = ofdm_fa;
5428 		ofdm_fa += (0xFFFFFFFF - temp);
5429 	} else {
5430 		ofdm_fa -= rx_sens_p->last_false_alarm_cnt_ofdm;
5431 		rx_sens_p->last_false_alarm_cnt_ofdm += ofdm_fa;
5432 	}
5433 
5434 	/* accumulate bad plcp */
5435 	if (rx_sens_p->last_bad_plcp_cnt_ofdm > ofdm_bp) {
5436 		temp = rx_sens_p->last_bad_plcp_cnt_ofdm;
5437 		rx_sens_p->last_bad_plcp_cnt_ofdm = ofdm_bp;
5438 		ofdm_bp += (0xFFFFFFFF - temp);
5439 	} else {
5440 		ofdm_bp -= rx_sens_p->last_bad_plcp_cnt_ofdm;
5441 		rx_sens_p->last_bad_plcp_cnt_ofdm += ofdm_bp;
5442 	}
5443 
5444 	ofdm_sum_fa_bp = (ofdm_fa + ofdm_bp) * 200 * 1024; /* relative value */
5445 
5446 	/*
5447 	 * adjust parameter in sensitivity command according to different status
5448 	 */
5449 	if (ofdm_sum_fa_bp > (50 * actual_rx_time)) {
5450 		temp1 = rx_sens_p->auto_corr_ofdm_x4 + 1;
5451 		rx_sens_p->auto_corr_ofdm_x4 = (temp1 <= 120) ? temp1 : 120;
5452 
5453 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x4 + 1;
5454 		rx_sens_p->auto_corr_mrc_ofdm_x4 =
5455 		    (temp1 <= 210) ? temp1 : 210;
5456 
5457 		temp1 = rx_sens_p->auto_corr_ofdm_x1 + 1;
5458 		rx_sens_p->auto_corr_ofdm_x1 = (temp1 <= 140) ? temp1 : 140;
5459 
5460 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x1 + 1;
5461 		rx_sens_p->auto_corr_mrc_ofdm_x1 =
5462 		    (temp1 <= 270) ? temp1 : 270;
5463 
5464 		rx_sens_p->flags |= IWK_SENSITIVITY_OFDM_UPDATE_MSK;
5465 
5466 	} else if (ofdm_sum_fa_bp < (5 * actual_rx_time)) {
5467 		temp1 = rx_sens_p->auto_corr_ofdm_x4 - 1;
5468 		rx_sens_p->auto_corr_ofdm_x4 = (temp1 >= 85) ? temp1 : 85;
5469 
5470 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x4 - 1;
5471 		rx_sens_p->auto_corr_mrc_ofdm_x4 =
5472 		    (temp1 >= 170) ? temp1 : 170;
5473 
5474 		temp1 = rx_sens_p->auto_corr_ofdm_x1 - 1;
5475 		rx_sens_p->auto_corr_ofdm_x1 = (temp1 >= 105) ? temp1 : 105;
5476 
5477 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x1 - 1;
5478 		rx_sens_p->auto_corr_mrc_ofdm_x1 =
5479 		    (temp1 >= 220) ? temp1 : 220;
5480 
5481 		rx_sens_p->flags |= IWK_SENSITIVITY_OFDM_UPDATE_MSK;
5482 
5483 	} else {
5484 		rx_sens_p->flags &= (~IWK_SENSITIVITY_OFDM_UPDATE_MSK);
5485 	}
5486 
5487 	return (IWK_SUCCESS);
5488 }
5489 
5490 /*
5491  * 1)  log_event_table_ptr indicates base of the event log.  This traces
5492  *     a 256-entry history of uCode execution within a circular buffer.
5493  *     Its header format is:
5494  *
5495  *	uint32_t log_size;	log capacity (in number of entries)
5496  *	uint32_t type;	(1) timestamp with each entry, (0) no timestamp
5497  *	uint32_t wraps;	# times uCode has wrapped to top of circular buffer
5498  *      uint32_t write_index;	next circular buffer entry that uCode would fill
5499  *
5500  *     The header is followed by the circular buffer of log entries.  Entries
5501  *     with timestamps have the following format:
5502  *
5503  *	uint32_t event_id;     range 0 - 1500
5504  *	uint32_t timestamp;    low 32 bits of TSF (of network, if associated)
5505  *	uint32_t data;         event_id-specific data value
5506  *
5507  *     Entries without timestamps contain only event_id and data.
5508  */
5509 
5510 /*
5511  * iwk_write_event_log - Write event log to dmesg
5512  */
5513 static void iwk_write_event_log(iwk_sc_t *sc)
5514 {
5515 	uint32_t log_event_table_ptr;	/* Start address of event table */
5516 	uint32_t startptr;	/* Start address of log data */
5517 	uint32_t logptr;	/* address of log data entry */
5518 	uint32_t i, n, num_events;
5519 	uint32_t event_id, data1, data2; /* log data */
5520 
5521 	uint32_t log_size;   /* log capacity (in number of entries) */
5522 	uint32_t type;	/* (1)timestamp with each entry,(0) no timestamp */
5523 	uint32_t wraps;	/* # times uCode has wrapped to */
5524 			/* the top of circular buffer */
5525 	uint32_t idx; /* index of entry to be filled in next */
5526 
5527 	log_event_table_ptr = sc->sc_card_alive_run.log_event_table_ptr;
5528 	if (!(log_event_table_ptr)) {
5529 		IWK_DBG((IWK_DEBUG_EEPROM, "NULL event table pointer\n"));
5530 		return;
5531 	}
5532 
5533 	iwk_mac_access_enter(sc);
5534 
5535 	/* Read log header */
5536 	log_size = iwk_mem_read(sc, log_event_table_ptr);
5537 	log_event_table_ptr += sizeof (uint32_t); /* addr of "type" */
5538 	type = iwk_mem_read(sc, log_event_table_ptr);
5539 	log_event_table_ptr += sizeof (uint32_t); /* addr of "wraps" */
5540 	wraps = iwk_mem_read(sc, log_event_table_ptr);
5541 	log_event_table_ptr += sizeof (uint32_t); /* addr of "idx" */
5542 	idx = iwk_mem_read(sc, log_event_table_ptr);
5543 	startptr = log_event_table_ptr +
5544 	    sizeof (uint32_t); /* addr of start of log data */
5545 	if (!log_size & !wraps) {
5546 		IWK_DBG((IWK_DEBUG_EEPROM, "Empty log\n"));
5547 		iwk_mac_access_exit(sc);
5548 		return;
5549 	}
5550 
5551 	if (!wraps) {
5552 		num_events = idx;
5553 		logptr = startptr;
5554 	} else {
5555 		num_events = log_size - idx;
5556 		n = type ? 2 : 3;
5557 		logptr = startptr + (idx * n * sizeof (uint32_t));
5558 	}
5559 
5560 	for (i = 0; i < num_events; i++) {
5561 		event_id = iwk_mem_read(sc, logptr);
5562 		logptr += sizeof (uint32_t);
5563 		data1 = iwk_mem_read(sc, logptr);
5564 		logptr += sizeof (uint32_t);
5565 		if (type == 0) { /* no timestamp */
5566 			IWK_DBG((IWK_DEBUG_EEPROM, "Event ID=%d, Data=%x0x",
5567 			    event_id, data1));
5568 		} else { /* timestamp */
5569 			data2 = iwk_mem_read(sc, logptr);
5570 			printf("Time=%d, Event ID=%d, Data=0x%x\n",
5571 			    data1, event_id, data2);
5572 			IWK_DBG((IWK_DEBUG_EEPROM,
5573 			    "Time=%d, Event ID=%d, Data=0x%x\n",
5574 			    data1, event_id, data2));
5575 			logptr += sizeof (uint32_t);
5576 		}
5577 	}
5578 
5579 	/*
5580 	 * Print the wrapped around entries, if any
5581 	 */
5582 	if (wraps) {
5583 		logptr = startptr;
5584 		for (i = 0; i < idx; i++) {
5585 			event_id = iwk_mem_read(sc, logptr);
5586 			logptr += sizeof (uint32_t);
5587 			data1 = iwk_mem_read(sc, logptr);
5588 			logptr += sizeof (uint32_t);
5589 			if (type == 0) { /* no timestamp */
5590 				IWK_DBG((IWK_DEBUG_EEPROM,
5591 				    "Event ID=%d, Data=%x0x", event_id, data1));
5592 			} else { /* timestamp */
5593 				data2 = iwk_mem_read(sc, logptr);
5594 				IWK_DBG((IWK_DEBUG_EEPROM,
5595 				    "Time = %d, Event ID=%d, Data=0x%x\n",
5596 				    data1, event_id, data2));
5597 				logptr += sizeof (uint32_t);
5598 			}
5599 		}
5600 	}
5601 
5602 	iwk_mac_access_exit(sc);
5603 }
5604 
5605 /*
5606  * error_event_table_ptr indicates base of the error log.  This contains
5607  * information about any uCode error that occurs.  For 4965, the format is:
5608  *
5609  * uint32_t valid;        (nonzero) valid, (0) log is empty
5610  * uint32_t error_id;     type of error
5611  * uint32_t pc;           program counter
5612  * uint32_t blink1;       branch link
5613  * uint32_t blink2;       branch link
5614  * uint32_t ilink1;       interrupt link
5615  * uint32_t ilink2;       interrupt link
5616  * uint32_t data1;        error-specific data
5617  * uint32_t data2;        error-specific data
5618  * uint32_t line;         source code line of error
5619  * uint32_t bcon_time;    beacon timer
5620  * uint32_t tsf_low;      network timestamp function timer
5621  * uint32_t tsf_hi;       network timestamp function timer
5622  */
5623 /*
5624  * iwk_write_error_log - Write error log to dmesg
5625  */
5626 static void iwk_write_error_log(iwk_sc_t *sc)
5627 {
5628 	uint32_t err_ptr;	/* Start address of error log */
5629 	uint32_t valid;		/* is error log valid */
5630 
5631 	err_ptr = sc->sc_card_alive_run.error_event_table_ptr;
5632 	if (!(err_ptr)) {
5633 		IWK_DBG((IWK_DEBUG_EEPROM, "NULL error table pointer\n"));
5634 		return;
5635 	}
5636 
5637 	iwk_mac_access_enter(sc);
5638 
5639 	valid = iwk_mem_read(sc, err_ptr);
5640 	if (!(valid)) {
5641 		IWK_DBG((IWK_DEBUG_EEPROM, "Error data not valid\n"));
5642 		iwk_mac_access_exit(sc);
5643 		return;
5644 	}
5645 	err_ptr += sizeof (uint32_t);
5646 	IWK_DBG((IWK_DEBUG_EEPROM, "err=%d ", iwk_mem_read(sc, err_ptr)));
5647 	err_ptr += sizeof (uint32_t);
5648 	IWK_DBG((IWK_DEBUG_EEPROM, "pc=0x%X ", iwk_mem_read(sc, err_ptr)));
5649 	err_ptr += sizeof (uint32_t);
5650 	IWK_DBG((IWK_DEBUG_EEPROM,
5651 	    "branch link1=0x%X ", iwk_mem_read(sc, err_ptr)));
5652 	err_ptr += sizeof (uint32_t);
5653 	IWK_DBG((IWK_DEBUG_EEPROM,
5654 	    "branch link2=0x%X ", iwk_mem_read(sc, err_ptr)));
5655 	err_ptr += sizeof (uint32_t);
5656 	IWK_DBG((IWK_DEBUG_EEPROM,
5657 	    "interrupt link1=0x%X ", iwk_mem_read(sc, err_ptr)));
5658 	err_ptr += sizeof (uint32_t);
5659 	IWK_DBG((IWK_DEBUG_EEPROM,
5660 	    "interrupt link2=0x%X ", iwk_mem_read(sc, err_ptr)));
5661 	err_ptr += sizeof (uint32_t);
5662 	IWK_DBG((IWK_DEBUG_EEPROM, "data1=0x%X ", iwk_mem_read(sc, err_ptr)));
5663 	err_ptr += sizeof (uint32_t);
5664 	IWK_DBG((IWK_DEBUG_EEPROM, "data2=0x%X ", iwk_mem_read(sc, err_ptr)));
5665 	err_ptr += sizeof (uint32_t);
5666 	IWK_DBG((IWK_DEBUG_EEPROM, "line=%d ", iwk_mem_read(sc, err_ptr)));
5667 	err_ptr += sizeof (uint32_t);
5668 	IWK_DBG((IWK_DEBUG_EEPROM, "bcon_time=%d ", iwk_mem_read(sc, err_ptr)));
5669 	err_ptr += sizeof (uint32_t);
5670 	IWK_DBG((IWK_DEBUG_EEPROM, "tsf_low=%d ", iwk_mem_read(sc, err_ptr)));
5671 	err_ptr += sizeof (uint32_t);
5672 	IWK_DBG((IWK_DEBUG_EEPROM, "tsf_hi=%d\n", iwk_mem_read(sc, err_ptr)));
5673 
5674 	iwk_mac_access_exit(sc);
5675 }
5676