xref: /titanic_41/usr/src/uts/common/io/iwk/iwk2.c (revision 186f7fbf5e07d046b50e4e15c32b21f109b76c80)
1 /*
2  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright (c) 2007, Intel Corporation
8  * All rights reserved.
9  */
10 
11 /*
12  * Copyright (c) 2006
13  * Copyright (c) 2007
14  *	Damien Bergamini <damien.bergamini@free.fr>
15  *
16  * Permission to use, copy, modify, and distribute this software for any
17  * purpose with or without fee is hereby granted, provided that the above
18  * copyright notice and this permission notice appear in all copies.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
21  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
22  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
24  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
25  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
26  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27  */
28 
29 /*
30  * Driver for Intel PRO/Wireless 4965AGN(kedron) 802.11 network adapters.
31  */
32 
33 #include <sys/types.h>
34 #include <sys/byteorder.h>
35 #include <sys/conf.h>
36 #include <sys/cmn_err.h>
37 #include <sys/stat.h>
38 #include <sys/ddi.h>
39 #include <sys/sunddi.h>
40 #include <sys/strsubr.h>
41 #include <sys/ethernet.h>
42 #include <inet/common.h>
43 #include <inet/nd.h>
44 #include <inet/mi.h>
45 #include <sys/note.h>
46 #include <sys/stream.h>
47 #include <sys/strsun.h>
48 #include <sys/modctl.h>
49 #include <sys/devops.h>
50 #include <sys/dlpi.h>
51 #include <sys/mac.h>
52 #include <sys/mac_wifi.h>
53 #include <sys/net80211.h>
54 #include <sys/net80211_proto.h>
55 #include <sys/varargs.h>
56 #include <sys/policy.h>
57 #include <sys/pci.h>
58 
59 #include "iwk_calibration.h"
60 #include "iwk_hw.h"
61 #include "iwk_eeprom.h"
62 #include "iwk2_var.h"
63 #include <inet/wifi_ioctl.h>
64 
65 #ifdef DEBUG
66 #define	IWK_DEBUG_80211		(1 << 0)
67 #define	IWK_DEBUG_CMD		(1 << 1)
68 #define	IWK_DEBUG_DMA		(1 << 2)
69 #define	IWK_DEBUG_EEPROM	(1 << 3)
70 #define	IWK_DEBUG_FW		(1 << 4)
71 #define	IWK_DEBUG_HW		(1 << 5)
72 #define	IWK_DEBUG_INTR		(1 << 6)
73 #define	IWK_DEBUG_MRR		(1 << 7)
74 #define	IWK_DEBUG_PIO		(1 << 8)
75 #define	IWK_DEBUG_RX		(1 << 9)
76 #define	IWK_DEBUG_SCAN		(1 << 10)
77 #define	IWK_DEBUG_TX		(1 << 11)
78 #define	IWK_DEBUG_RATECTL	(1 << 12)
79 #define	IWK_DEBUG_RADIO		(1 << 13)
80 #define	IWK_DEBUG_RESUME	(1 << 14)
81 #define	IWK_DEBUG_CALIBRATION	(1 << 15)
82 uint32_t iwk_dbg_flags = 0;
83 #define	IWK_DBG(x) \
84 	iwk_dbg x
85 #else
86 #define	IWK_DBG(x)
87 #endif
88 
89 static void	*iwk_soft_state_p = NULL;
90 static uint8_t iwk_fw_bin [] = {
91 #include "fw-iw/iw4965.ucode.hex"
92 };
93 
94 /* DMA attributes for a shared page */
95 static ddi_dma_attr_t sh_dma_attr = {
96 	DMA_ATTR_V0,	/* version of this structure */
97 	0,		/* lowest usable address */
98 	0xffffffffU,	/* highest usable address */
99 	0xffffffffU,	/* maximum DMAable byte count */
100 	0x1000,		/* alignment in bytes */
101 	0x1000,		/* burst sizes (any?) */
102 	1,		/* minimum transfer */
103 	0xffffffffU,	/* maximum transfer */
104 	0xffffffffU,	/* maximum segment length */
105 	1,		/* maximum number of segments */
106 	1,		/* granularity */
107 	0,		/* flags (reserved) */
108 };
109 
110 /* DMA attributes for a keep warm DRAM descriptor */
111 static ddi_dma_attr_t kw_dma_attr = {
112 	DMA_ATTR_V0,	/* version of this structure */
113 	0,		/* lowest usable address */
114 	0xffffffffU,	/* highest usable address */
115 	0xffffffffU,	/* maximum DMAable byte count */
116 	0x1000,		/* alignment in bytes */
117 	0x1000,		/* burst sizes (any?) */
118 	1,		/* minimum transfer */
119 	0xffffffffU,	/* maximum transfer */
120 	0xffffffffU,	/* maximum segment length */
121 	1,		/* maximum number of segments */
122 	1,		/* granularity */
123 	0,		/* flags (reserved) */
124 };
125 
126 /* DMA attributes for a ring descriptor */
127 static ddi_dma_attr_t ring_desc_dma_attr = {
128 	DMA_ATTR_V0,	/* version of this structure */
129 	0,		/* lowest usable address */
130 	0xffffffffU,	/* highest usable address */
131 	0xffffffffU,	/* maximum DMAable byte count */
132 	0x100,		/* alignment in bytes */
133 	0x100,		/* burst sizes (any?) */
134 	1,		/* minimum transfer */
135 	0xffffffffU,	/* maximum transfer */
136 	0xffffffffU,	/* maximum segment length */
137 	1,		/* maximum number of segments */
138 	1,		/* granularity */
139 	0,		/* flags (reserved) */
140 };
141 
142 /* DMA attributes for a cmd */
143 static ddi_dma_attr_t cmd_dma_attr = {
144 	DMA_ATTR_V0,	/* version of this structure */
145 	0,		/* lowest usable address */
146 	0xffffffffU,	/* highest usable address */
147 	0xffffffffU,	/* maximum DMAable byte count */
148 	4,		/* alignment in bytes */
149 	0x100,		/* burst sizes (any?) */
150 	1,		/* minimum transfer */
151 	0xffffffffU,	/* maximum transfer */
152 	0xffffffffU,	/* maximum segment length */
153 	1,		/* maximum number of segments */
154 	1,		/* granularity */
155 	0,		/* flags (reserved) */
156 };
157 
158 /* DMA attributes for a rx buffer */
159 static ddi_dma_attr_t rx_buffer_dma_attr = {
160 	DMA_ATTR_V0,	/* version of this structure */
161 	0,		/* lowest usable address */
162 	0xffffffffU,	/* highest usable address */
163 	0xffffffffU,	/* maximum DMAable byte count */
164 	0x100,		/* alignment in bytes */
165 	0x100,		/* burst sizes (any?) */
166 	1,		/* minimum transfer */
167 	0xffffffffU,	/* maximum transfer */
168 	0xffffffffU,	/* maximum segment length */
169 	1,		/* maximum number of segments */
170 	1,		/* granularity */
171 	0,		/* flags (reserved) */
172 };
173 
174 /*
175  * DMA attributes for a tx buffer.
176  * the maximum number of segments is 4 for the hardware.
177  * now all the wifi drivers put the whole frame in a single
178  * descriptor, so we define the maximum  number of segments 1,
179  * just the same as the rx_buffer. we consider leverage the HW
180  * ability in the future, that is why we don't define rx and tx
181  * buffer_dma_attr as the same.
182  */
183 static ddi_dma_attr_t tx_buffer_dma_attr = {
184 	DMA_ATTR_V0,	/* version of this structure */
185 	0,		/* lowest usable address */
186 	0xffffffffU,	/* highest usable address */
187 	0xffffffffU,	/* maximum DMAable byte count */
188 	4,		/* alignment in bytes */
189 	0x100,		/* burst sizes (any?) */
190 	1,		/* minimum transfer */
191 	0xffffffffU,	/* maximum transfer */
192 	0xffffffffU,	/* maximum segment length */
193 	1,		/* maximum number of segments */
194 	1,		/* granularity */
195 	0,		/* flags (reserved) */
196 };
197 
198 /* DMA attributes for text and data part in the firmware */
199 static ddi_dma_attr_t fw_dma_attr = {
200 	DMA_ATTR_V0,	/* version of this structure */
201 	0,		/* lowest usable address */
202 	0xffffffffU,	/* highest usable address */
203 	0x7fffffff,	/* maximum DMAable byte count */
204 	0x10,		/* alignment in bytes */
205 	0x100,		/* burst sizes (any?) */
206 	1,		/* minimum transfer */
207 	0xffffffffU,	/* maximum transfer */
208 	0xffffffffU,	/* maximum segment length */
209 	1,		/* maximum number of segments */
210 	1,		/* granularity */
211 	0,		/* flags (reserved) */
212 };
213 
214 
215 /* regs access attributes */
216 static ddi_device_acc_attr_t iwk_reg_accattr = {
217 	DDI_DEVICE_ATTR_V0,
218 	DDI_STRUCTURE_LE_ACC,
219 	DDI_STRICTORDER_ACC,
220 	DDI_DEFAULT_ACC
221 };
222 
223 /* DMA access attributes */
224 static ddi_device_acc_attr_t iwk_dma_accattr = {
225 	DDI_DEVICE_ATTR_V0,
226 	DDI_NEVERSWAP_ACC,
227 	DDI_STRICTORDER_ACC,
228 	DDI_DEFAULT_ACC
229 };
230 
231 static int	iwk_ring_init(iwk_sc_t *);
232 static void	iwk_ring_free(iwk_sc_t *);
233 static int	iwk_alloc_shared(iwk_sc_t *);
234 static void	iwk_free_shared(iwk_sc_t *);
235 static int	iwk_alloc_kw(iwk_sc_t *);
236 static void	iwk_free_kw(iwk_sc_t *);
237 static int	iwk_alloc_fw_dma(iwk_sc_t *);
238 static void	iwk_free_fw_dma(iwk_sc_t *);
239 static int	iwk_alloc_rx_ring(iwk_sc_t *);
240 static void	iwk_reset_rx_ring(iwk_sc_t *);
241 static void	iwk_free_rx_ring(iwk_sc_t *);
242 static int	iwk_alloc_tx_ring(iwk_sc_t *, iwk_tx_ring_t *,
243     int, int);
244 static void	iwk_reset_tx_ring(iwk_sc_t *, iwk_tx_ring_t *);
245 static void	iwk_free_tx_ring(iwk_sc_t *, iwk_tx_ring_t *);
246 
247 static ieee80211_node_t *iwk_node_alloc(ieee80211com_t *);
248 static void	iwk_node_free(ieee80211_node_t *);
249 static int	iwk_newstate(ieee80211com_t *, enum ieee80211_state, int);
250 static int	iwk_key_set(ieee80211com_t *, const struct ieee80211_key *,
251     const uint8_t mac[IEEE80211_ADDR_LEN]);
252 static void	iwk_mac_access_enter(iwk_sc_t *);
253 static void	iwk_mac_access_exit(iwk_sc_t *);
254 static uint32_t	iwk_reg_read(iwk_sc_t *, uint32_t);
255 static void	iwk_reg_write(iwk_sc_t *, uint32_t, uint32_t);
256 static void	iwk_reg_write_region_4(iwk_sc_t *, uint32_t,
257 		    uint32_t *, int);
258 static int	iwk_load_firmware(iwk_sc_t *);
259 static void	iwk_rx_intr(iwk_sc_t *, iwk_rx_desc_t *,
260 		    iwk_rx_data_t *);
261 static void	iwk_tx_intr(iwk_sc_t *, iwk_rx_desc_t *,
262 		    iwk_rx_data_t *);
263 static void	iwk_cmd_intr(iwk_sc_t *, iwk_rx_desc_t *);
264 static uint_t   iwk_intr(caddr_t, caddr_t);
265 static int	iwk_eep_load(iwk_sc_t *sc);
266 static void	iwk_get_mac_from_eep(iwk_sc_t *sc);
267 static int	iwk_eep_sem_down(iwk_sc_t *sc);
268 static void	iwk_eep_sem_up(iwk_sc_t *sc);
269 static uint_t   iwk_rx_softintr(caddr_t, caddr_t);
270 static uint8_t	iwk_rate_to_plcp(int);
271 static int	iwk_cmd(iwk_sc_t *, int, const void *, int, int);
272 static void	iwk_set_led(iwk_sc_t *, uint8_t, uint8_t, uint8_t);
273 static int	iwk_hw_set_before_auth(iwk_sc_t *);
274 static int	iwk_scan(iwk_sc_t *);
275 static int	iwk_config(iwk_sc_t *);
276 static void	iwk_stop_master(iwk_sc_t *);
277 static int	iwk_power_up(iwk_sc_t *);
278 static int	iwk_preinit(iwk_sc_t *);
279 static int	iwk_init(iwk_sc_t *);
280 static void	iwk_stop(iwk_sc_t *);
281 static void	iwk_amrr_init(iwk_amrr_t *);
282 static void	iwk_amrr_timeout(iwk_sc_t *);
283 static void	iwk_amrr_ratectl(void *, ieee80211_node_t *);
284 static int32_t	iwk_curr_tempera(iwk_sc_t *sc);
285 static int	iwk_tx_power_calibration(iwk_sc_t *sc);
286 static inline int	iwk_is_24G_band(iwk_sc_t *sc);
287 static inline int	iwk_is_fat_channel(iwk_sc_t *sc);
288 static int	iwk_txpower_grp(uint16_t channel);
289 static struct	iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc,
290     uint16_t channel,
291     int is_24G, int is_fat, int is_hi_chan);
292 static int32_t	iwk_band_number(iwk_sc_t *sc, uint16_t channel);
293 static int	iwk_division(int32_t num, int32_t denom, int32_t *res);
294 static int32_t	iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1,
295     int32_t x2, int32_t y2);
296 static int	iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel,
297     struct iwk_eep_calib_channel_info *chan_info);
298 static int32_t	iwk_voltage_compensation(int32_t eep_voltage,
299     int32_t curr_voltage);
300 static int32_t	iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G);
301 static int	iwk_txpower_table_cmd_init(iwk_sc_t *sc,
302     struct iwk_tx_power_db *tp_db);
303 static void	iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc);
304 static int	iwk_is_associated(iwk_sc_t *sc);
305 static int	iwk_rxgain_diff_init(iwk_sc_t *sc);
306 static int	iwk_rxgain_diff(iwk_sc_t *sc);
307 static int	iwk_rx_sens_init(iwk_sc_t *sc);
308 static int	iwk_rx_sens(iwk_sc_t *sc);
309 static int	iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time);
310 static int	iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time);
311 
312 static void	iwk_write_event_log(iwk_sc_t *);
313 static void	iwk_write_error_log(iwk_sc_t *);
314 
315 static int	iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
316 static int	iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
317 
318 /*
319  * GLD specific operations
320  */
321 static int	iwk_m_stat(void *arg, uint_t stat, uint64_t *val);
322 static int	iwk_m_start(void *arg);
323 static void	iwk_m_stop(void *arg);
324 static int	iwk_m_unicst(void *arg, const uint8_t *macaddr);
325 static int	iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m);
326 static int	iwk_m_promisc(void *arg, boolean_t on);
327 static mblk_t 	*iwk_m_tx(void *arg, mblk_t *mp);
328 static void	iwk_m_ioctl(void *arg, queue_t *wq, mblk_t *mp);
329 static int	iwk_m_setprop(void *arg, const char *pr_name,
330 	mac_prop_id_t wldp_pr_name, uint_t wldp_length, const void *wldp_buf);
331 static int	iwk_m_getprop(void *arg, const char *pr_name,
332 	mac_prop_id_t wldp_pr_name, uint_t pr_flags, uint_t wldp_length,
333 	void *wldp_buf);
334 static void	iwk_destroy_locks(iwk_sc_t *sc);
335 static int	iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type);
336 static void	iwk_thread(iwk_sc_t *sc);
337 
338 /*
339  * Supported rates for 802.11b/g modes (in 500Kbps unit).
340  * 11a and 11n support will be added later.
341  */
342 static const struct ieee80211_rateset iwk_rateset_11b =
343 	{ 4, { 2, 4, 11, 22 } };
344 
345 static const struct ieee80211_rateset iwk_rateset_11g =
346 	{ 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
347 
348 /*
349  * For mfthread only
350  */
351 extern pri_t minclsyspri;
352 
353 #define	DRV_NAME_4965	"iwk"
354 
355 /*
356  * Module Loading Data & Entry Points
357  */
358 DDI_DEFINE_STREAM_OPS(iwk_devops, nulldev, nulldev, iwk_attach,
359     iwk_detach, nodev, NULL, D_MP, NULL, ddi_quiesce_not_supported);
360 
361 static struct modldrv iwk_modldrv = {
362 	&mod_driverops,
363 	"Intel(R) 4965AGN driver(N)",
364 	&iwk_devops
365 };
366 
367 static struct modlinkage iwk_modlinkage = {
368 	MODREV_1,
369 	&iwk_modldrv,
370 	NULL
371 };
372 
373 int
374 _init(void)
375 {
376 	int	status;
377 
378 	status = ddi_soft_state_init(&iwk_soft_state_p,
379 	    sizeof (iwk_sc_t), 1);
380 	if (status != DDI_SUCCESS)
381 		return (status);
382 
383 	mac_init_ops(&iwk_devops, DRV_NAME_4965);
384 	status = mod_install(&iwk_modlinkage);
385 	if (status != DDI_SUCCESS) {
386 		mac_fini_ops(&iwk_devops);
387 		ddi_soft_state_fini(&iwk_soft_state_p);
388 	}
389 
390 	return (status);
391 }
392 
393 int
394 _fini(void)
395 {
396 	int status;
397 
398 	status = mod_remove(&iwk_modlinkage);
399 	if (status == DDI_SUCCESS) {
400 		mac_fini_ops(&iwk_devops);
401 		ddi_soft_state_fini(&iwk_soft_state_p);
402 	}
403 
404 	return (status);
405 }
406 
407 int
408 _info(struct modinfo *mip)
409 {
410 	return (mod_info(&iwk_modlinkage, mip));
411 }
412 
413 /*
414  * Mac Call Back entries
415  */
416 mac_callbacks_t	iwk_m_callbacks = {
417 	MC_IOCTL | MC_SETPROP | MC_GETPROP,
418 	iwk_m_stat,
419 	iwk_m_start,
420 	iwk_m_stop,
421 	iwk_m_promisc,
422 	iwk_m_multicst,
423 	iwk_m_unicst,
424 	iwk_m_tx,
425 	NULL,
426 	iwk_m_ioctl,
427 	NULL,
428 	NULL,
429 	NULL,
430 	iwk_m_setprop,
431 	iwk_m_getprop
432 };
433 
434 #ifdef DEBUG
435 void
436 iwk_dbg(uint32_t flags, const char *fmt, ...)
437 {
438 	va_list	ap;
439 
440 	if (flags & iwk_dbg_flags) {
441 		va_start(ap, fmt);
442 		vcmn_err(CE_NOTE, fmt, ap);
443 		va_end(ap);
444 	}
445 }
446 #endif
447 
448 /*
449  * device operations
450  */
451 int
452 iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
453 {
454 	iwk_sc_t		*sc;
455 	ieee80211com_t	*ic;
456 	int			instance, err, i;
457 	char			strbuf[32];
458 	wifi_data_t		wd = { 0 };
459 	mac_register_t		*macp;
460 
461 	int			intr_type;
462 	int			intr_count;
463 	int			intr_actual;
464 
465 	switch (cmd) {
466 	case DDI_ATTACH:
467 		break;
468 	case DDI_RESUME:
469 		sc = ddi_get_soft_state(iwk_soft_state_p,
470 		    ddi_get_instance(dip));
471 		ASSERT(sc != NULL);
472 		mutex_enter(&sc->sc_glock);
473 		sc->sc_flags &= ~IWK_F_SUSPEND;
474 		mutex_exit(&sc->sc_glock);
475 		if (sc->sc_flags & IWK_F_RUNNING) {
476 			(void) iwk_init(sc);
477 			ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
478 		}
479 		IWK_DBG((IWK_DEBUG_RESUME, "iwk: resume\n"));
480 		return (DDI_SUCCESS);
481 	default:
482 		err = DDI_FAILURE;
483 		goto attach_fail1;
484 	}
485 
486 	instance = ddi_get_instance(dip);
487 	err = ddi_soft_state_zalloc(iwk_soft_state_p, instance);
488 	if (err != DDI_SUCCESS) {
489 		cmn_err(CE_WARN,
490 		    "iwk_attach(): failed to allocate soft state\n");
491 		goto attach_fail1;
492 	}
493 	sc = ddi_get_soft_state(iwk_soft_state_p, instance);
494 	sc->sc_dip = dip;
495 
496 	err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0,
497 	    &iwk_reg_accattr, &sc->sc_cfg_handle);
498 	if (err != DDI_SUCCESS) {
499 		cmn_err(CE_WARN,
500 		    "iwk_attach(): failed to map config spaces regs\n");
501 		goto attach_fail2;
502 	}
503 	sc->sc_rev = ddi_get8(sc->sc_cfg_handle,
504 	    (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID));
505 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0x41), 0);
506 	sc->sc_clsz = ddi_get16(sc->sc_cfg_handle,
507 	    (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ));
508 	if (!sc->sc_clsz)
509 		sc->sc_clsz = 16;
510 	sc->sc_clsz = (sc->sc_clsz << 2);
511 	sc->sc_dmabuf_sz = roundup(0x1000 + sizeof (struct ieee80211_frame) +
512 	    IEEE80211_MTU + IEEE80211_CRC_LEN +
513 	    (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
514 	    IEEE80211_WEP_CRCLEN), sc->sc_clsz);
515 	/*
516 	 * Map operating registers
517 	 */
518 	err = ddi_regs_map_setup(dip, 1, &sc->sc_base,
519 	    0, 0, &iwk_reg_accattr, &sc->sc_handle);
520 	if (err != DDI_SUCCESS) {
521 		cmn_err(CE_WARN,
522 		    "iwk_attach(): failed to map device regs\n");
523 		goto attach_fail2a;
524 	}
525 
526 	err = ddi_intr_get_supported_types(dip, &intr_type);
527 	if ((err != DDI_SUCCESS) || (!(intr_type & DDI_INTR_TYPE_FIXED))) {
528 		cmn_err(CE_WARN, "iwk_attach(): "
529 		    "Fixed type interrupt is not supported\n");
530 		goto attach_fail_intr_a;
531 	}
532 
533 	err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &intr_count);
534 	if ((err != DDI_SUCCESS) || (intr_count != 1)) {
535 		cmn_err(CE_WARN, "iwk_attach(): "
536 		    "No fixed interrupts\n");
537 		goto attach_fail_intr_a;
538 	}
539 
540 	sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
541 
542 	err = ddi_intr_alloc(dip, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0,
543 	    intr_count, &intr_actual, 0);
544 	if ((err != DDI_SUCCESS) || (intr_actual != 1)) {
545 		cmn_err(CE_WARN, "iwk_attach(): "
546 		    "ddi_intr_alloc() failed 0x%x\n", err);
547 		goto attach_fail_intr_b;
548 	}
549 
550 	err = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri);
551 	if (err != DDI_SUCCESS) {
552 		cmn_err(CE_WARN, "iwk_attach(): "
553 		    "ddi_intr_get_pri() failed 0x%x\n", err);
554 		goto attach_fail_intr_c;
555 	}
556 
557 	mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER,
558 	    DDI_INTR_PRI(sc->sc_intr_pri));
559 	mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER,
560 	    DDI_INTR_PRI(sc->sc_intr_pri));
561 	mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER,
562 	    DDI_INTR_PRI(sc->sc_intr_pri));
563 
564 	cv_init(&sc->sc_fw_cv, NULL, CV_DRIVER, NULL);
565 	cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL);
566 	cv_init(&sc->sc_tx_cv, "tx-ring", CV_DRIVER, NULL);
567 	/*
568 	 * initialize the mfthread
569 	 */
570 	cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL);
571 	sc->sc_mf_thread = NULL;
572 	sc->sc_mf_thread_switch = 0;
573 
574 	/*
575 	 * Allocate shared page.
576 	 */
577 	err = iwk_alloc_shared(sc);
578 	if (err != DDI_SUCCESS) {
579 		cmn_err(CE_WARN, "iwk_attach(): "
580 		    "failed to allocate shared page\n");
581 		goto attach_fail3;
582 	}
583 
584 	/*
585 	 * Allocate keep warm page.
586 	 */
587 	err = iwk_alloc_kw(sc);
588 	if (err != DDI_SUCCESS) {
589 		cmn_err(CE_WARN, "iwk_attach(): "
590 		    "failed to allocate keep warm page\n");
591 		goto attach_fail3a;
592 	}
593 
594 	/*
595 	 * Do some necessary hardware initializations.
596 	 */
597 	err = iwk_preinit(sc);
598 	if (err != DDI_SUCCESS) {
599 		cmn_err(CE_WARN, "iwk_attach(): "
600 		    "failed to init hardware\n");
601 		goto attach_fail4;
602 	}
603 
604 	/* initialize EEPROM */
605 	err = iwk_eep_load(sc);  /* get hardware configurations from eeprom */
606 	if (err != 0) {
607 		cmn_err(CE_WARN, "iwk_attach(): failed to load eeprom\n");
608 		goto attach_fail4;
609 	}
610 
611 	if (sc->sc_eep_map.calib_version < EEP_TX_POWER_VERSION_NEW) {
612 		IWK_DBG((IWK_DEBUG_EEPROM, "older EEPROM detected"));
613 		goto attach_fail4;
614 	}
615 
616 	iwk_get_mac_from_eep(sc);
617 
618 	err = iwk_ring_init(sc);
619 	if (err != DDI_SUCCESS) {
620 		cmn_err(CE_WARN, "iwk_attach(): "
621 		    "failed to allocate and initialize ring\n");
622 		goto attach_fail4;
623 	}
624 
625 	sc->sc_hdr = (iwk_firmware_hdr_t *)iwk_fw_bin;
626 
627 	err = iwk_alloc_fw_dma(sc);
628 	if (err != DDI_SUCCESS) {
629 		cmn_err(CE_WARN, "iwk_attach(): "
630 		    "failed to allocate firmware dma\n");
631 		goto attach_fail5;
632 	}
633 
634 	/*
635 	 * Initialize the wifi part, which will be used by
636 	 * generic layer
637 	 */
638 	ic = &sc->sc_ic;
639 	ic->ic_phytype  = IEEE80211_T_OFDM;
640 	ic->ic_opmode   = IEEE80211_M_STA; /* default to BSS mode */
641 	ic->ic_state    = IEEE80211_S_INIT;
642 	ic->ic_maxrssi  = 100; /* experimental number */
643 	ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT |
644 	    IEEE80211_C_PMGT | IEEE80211_C_SHSLOT;
645 	/*
646 	 * use software WEP and TKIP, hardware CCMP;
647 	 */
648 	ic->ic_caps |= IEEE80211_C_AES_CCM;
649 	/*
650 	 * Support WPA/WPA2
651 	 */
652 	ic->ic_caps |= IEEE80211_C_WPA;
653 
654 	/* set supported .11b and .11g rates */
655 	ic->ic_sup_rates[IEEE80211_MODE_11B] = iwk_rateset_11b;
656 	ic->ic_sup_rates[IEEE80211_MODE_11G] = iwk_rateset_11g;
657 
658 	/* set supported .11b and .11g channels (1 through 14) */
659 	for (i = 1; i <= 14; i++) {
660 		ic->ic_sup_channels[i].ich_freq =
661 		    ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
662 		ic->ic_sup_channels[i].ich_flags =
663 		    IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
664 		    IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
665 	}
666 
667 	ic->ic_xmit = iwk_send;
668 	/*
669 	 * init Wifi layer
670 	 */
671 	ieee80211_attach(ic);
672 
673 	/*
674 	 * different instance has different WPA door
675 	 */
676 	(void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
677 	    ddi_driver_name(dip),
678 	    ddi_get_instance(dip));
679 
680 	/*
681 	 * Override 80211 default routines
682 	 */
683 	sc->sc_newstate = ic->ic_newstate;
684 	ic->ic_newstate = iwk_newstate;
685 	sc->sc_recv_mgmt = ic->ic_recv_mgmt;
686 	ic->ic_node_alloc = iwk_node_alloc;
687 	ic->ic_node_free = iwk_node_free;
688 	ic->ic_crypto.cs_key_set = iwk_key_set;
689 	ieee80211_media_init(ic);
690 	/*
691 	 * initialize default tx key
692 	 */
693 	ic->ic_def_txkey = 0;
694 	err = ddi_intr_add_softint(dip, &sc->sc_soft_hdl, DDI_INTR_SOFTPRI_MAX,
695 	    iwk_rx_softintr, (caddr_t)sc);
696 	if (err != DDI_SUCCESS) {
697 		cmn_err(CE_WARN, "iwk_attach(): "
698 		    "add soft interrupt failed\n");
699 		goto attach_fail7;
700 	}
701 
702 	/*
703 	 * Add the interrupt handler
704 	 */
705 	err = ddi_intr_add_handler(sc->sc_intr_htable[0], iwk_intr,
706 	    (caddr_t)sc, NULL);
707 	if (err != DDI_SUCCESS) {
708 		cmn_err(CE_WARN, "iwk_attach(): "
709 		    "ddi_intr_add_handle() failed\n");
710 		goto attach_fail8;
711 	}
712 
713 	err = ddi_intr_enable(sc->sc_intr_htable[0]);
714 	if (err != DDI_SUCCESS) {
715 		cmn_err(CE_WARN, "iwk_attach(): "
716 		    "ddi_intr_enable() failed\n");
717 		goto attach_fail_intr_d;
718 	}
719 
720 	/*
721 	 * Initialize pointer to device specific functions
722 	 */
723 	wd.wd_secalloc = WIFI_SEC_NONE;
724 	wd.wd_opmode = ic->ic_opmode;
725 	IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr);
726 
727 	macp = mac_alloc(MAC_VERSION);
728 	if (err != DDI_SUCCESS) {
729 		cmn_err(CE_WARN,
730 		    "iwk_attach(): failed to do mac_alloc()\n");
731 		goto attach_fail9;
732 	}
733 
734 	macp->m_type_ident	= MAC_PLUGIN_IDENT_WIFI;
735 	macp->m_driver		= sc;
736 	macp->m_dip		= dip;
737 	macp->m_src_addr	= ic->ic_macaddr;
738 	macp->m_callbacks	= &iwk_m_callbacks;
739 	macp->m_min_sdu		= 0;
740 	macp->m_max_sdu		= IEEE80211_MTU;
741 	macp->m_pdata		= &wd;
742 	macp->m_pdata_size	= sizeof (wd);
743 
744 	/*
745 	 * Register the macp to mac
746 	 */
747 	err = mac_register(macp, &ic->ic_mach);
748 	mac_free(macp);
749 	if (err != DDI_SUCCESS) {
750 		cmn_err(CE_WARN,
751 		    "iwk_attach(): failed to do mac_register()\n");
752 		goto attach_fail9;
753 	}
754 
755 	/*
756 	 * Create minor node of type DDI_NT_NET_WIFI
757 	 */
758 	(void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_4965"%d", instance);
759 	err = ddi_create_minor_node(dip, strbuf, S_IFCHR,
760 	    instance + 1, DDI_NT_NET_WIFI, 0);
761 	if (err != DDI_SUCCESS)
762 		cmn_err(CE_WARN,
763 		    "iwk_attach(): failed to do ddi_create_minor_node()\n");
764 
765 	/*
766 	 * Notify link is down now
767 	 */
768 	mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
769 
770 	/*
771 	 * create the mf thread to handle the link status,
772 	 * recovery fatal error, etc.
773 	 */
774 	sc->sc_mf_thread_switch = 1;
775 	if (sc->sc_mf_thread == NULL)
776 		sc->sc_mf_thread = thread_create((caddr_t)NULL, 0,
777 		    iwk_thread, sc, 0, &p0, TS_RUN, minclsyspri);
778 
779 	sc->sc_flags |= IWK_F_ATTACHED;
780 
781 	return (DDI_SUCCESS);
782 attach_fail9:
783 	(void) ddi_intr_disable(sc->sc_intr_htable[0]);
784 attach_fail_intr_d:
785 	(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
786 
787 attach_fail8:
788 	(void) ddi_intr_remove_softint(sc->sc_soft_hdl);
789 	sc->sc_soft_hdl = NULL;
790 attach_fail7:
791 	ieee80211_detach(ic);
792 attach_fail6:
793 	iwk_free_fw_dma(sc);
794 attach_fail5:
795 	iwk_ring_free(sc);
796 attach_fail4:
797 	iwk_free_kw(sc);
798 attach_fail3a:
799 	iwk_free_shared(sc);
800 attach_fail3:
801 	iwk_destroy_locks(sc);
802 attach_fail_intr_c:
803 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
804 attach_fail_intr_b:
805 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
806 attach_fail_intr_a:
807 	ddi_regs_map_free(&sc->sc_handle);
808 attach_fail2a:
809 	ddi_regs_map_free(&sc->sc_cfg_handle);
810 attach_fail2:
811 	ddi_soft_state_free(iwk_soft_state_p, instance);
812 attach_fail1:
813 	return (err);
814 }
815 
816 int
817 iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
818 {
819 	iwk_sc_t	*sc;
820 	int err;
821 
822 	sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip));
823 	ASSERT(sc != NULL);
824 
825 	switch (cmd) {
826 	case DDI_DETACH:
827 		break;
828 	case DDI_SUSPEND:
829 		if (sc->sc_flags & IWK_F_RUNNING) {
830 			iwk_stop(sc);
831 		}
832 		mutex_enter(&sc->sc_glock);
833 		sc->sc_flags |= IWK_F_SUSPEND;
834 		mutex_exit(&sc->sc_glock);
835 		IWK_DBG((IWK_DEBUG_RESUME, "iwk: suspend\n"));
836 		return (DDI_SUCCESS);
837 	default:
838 		return (DDI_FAILURE);
839 	}
840 
841 	if (!(sc->sc_flags & IWK_F_ATTACHED))
842 		return (DDI_FAILURE);
843 
844 	err = mac_disable(sc->sc_ic.ic_mach);
845 	if (err != DDI_SUCCESS)
846 		return (err);
847 
848 	/*
849 	 * Destroy the mf_thread
850 	 */
851 	mutex_enter(&sc->sc_mt_lock);
852 	sc->sc_mf_thread_switch = 0;
853 	while (sc->sc_mf_thread != NULL) {
854 		if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0)
855 			break;
856 	}
857 	mutex_exit(&sc->sc_mt_lock);
858 
859 	iwk_stop(sc);
860 	DELAY(500000);
861 
862 	/*
863 	 * Unregiste from the MAC layer subsystem
864 	 */
865 	(void) mac_unregister(sc->sc_ic.ic_mach);
866 
867 	mutex_enter(&sc->sc_glock);
868 	iwk_free_fw_dma(sc);
869 	iwk_ring_free(sc);
870 	iwk_free_kw(sc);
871 	iwk_free_shared(sc);
872 	mutex_exit(&sc->sc_glock);
873 
874 	(void) ddi_intr_disable(sc->sc_intr_htable[0]);
875 	(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
876 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
877 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
878 
879 	(void) ddi_intr_remove_softint(sc->sc_soft_hdl);
880 	sc->sc_soft_hdl = NULL;
881 
882 	/*
883 	 * detach ieee80211
884 	 */
885 	ieee80211_detach(&sc->sc_ic);
886 
887 	iwk_destroy_locks(sc);
888 
889 	ddi_regs_map_free(&sc->sc_handle);
890 	ddi_regs_map_free(&sc->sc_cfg_handle);
891 	ddi_remove_minor_node(dip, NULL);
892 	ddi_soft_state_free(iwk_soft_state_p, ddi_get_instance(dip));
893 
894 	return (DDI_SUCCESS);
895 }
896 
897 static void
898 iwk_destroy_locks(iwk_sc_t *sc)
899 {
900 	cv_destroy(&sc->sc_mt_cv);
901 	mutex_destroy(&sc->sc_mt_lock);
902 	cv_destroy(&sc->sc_tx_cv);
903 	cv_destroy(&sc->sc_cmd_cv);
904 	cv_destroy(&sc->sc_fw_cv);
905 	mutex_destroy(&sc->sc_tx_lock);
906 	mutex_destroy(&sc->sc_glock);
907 }
908 
909 /*
910  * Allocate an area of memory and a DMA handle for accessing it
911  */
912 static int
913 iwk_alloc_dma_mem(iwk_sc_t *sc, size_t memsize,
914     ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p,
915     uint_t dma_flags, iwk_dma_t *dma_p)
916 {
917 	caddr_t vaddr;
918 	int err;
919 
920 	/*
921 	 * Allocate handle
922 	 */
923 	err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p,
924 	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
925 	if (err != DDI_SUCCESS) {
926 		dma_p->dma_hdl = NULL;
927 		return (DDI_FAILURE);
928 	}
929 
930 	/*
931 	 * Allocate memory
932 	 */
933 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
934 	    dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
935 	    DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
936 	if (err != DDI_SUCCESS) {
937 		ddi_dma_free_handle(&dma_p->dma_hdl);
938 		dma_p->dma_hdl = NULL;
939 		dma_p->acc_hdl = NULL;
940 		return (DDI_FAILURE);
941 	}
942 
943 	/*
944 	 * Bind the two together
945 	 */
946 	dma_p->mem_va = vaddr;
947 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
948 	    vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
949 	    &dma_p->cookie, &dma_p->ncookies);
950 	if (err != DDI_DMA_MAPPED) {
951 		ddi_dma_mem_free(&dma_p->acc_hdl);
952 		ddi_dma_free_handle(&dma_p->dma_hdl);
953 		dma_p->acc_hdl = NULL;
954 		dma_p->dma_hdl = NULL;
955 		return (DDI_FAILURE);
956 	}
957 
958 	dma_p->nslots = ~0U;
959 	dma_p->size = ~0U;
960 	dma_p->token = ~0U;
961 	dma_p->offset = 0;
962 	return (DDI_SUCCESS);
963 }
964 
965 /*
966  * Free one allocated area of DMAable memory
967  */
968 static void
969 iwk_free_dma_mem(iwk_dma_t *dma_p)
970 {
971 	if (dma_p->dma_hdl != NULL) {
972 		if (dma_p->ncookies) {
973 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
974 			dma_p->ncookies = 0;
975 		}
976 		ddi_dma_free_handle(&dma_p->dma_hdl);
977 		dma_p->dma_hdl = NULL;
978 	}
979 
980 	if (dma_p->acc_hdl != NULL) {
981 		ddi_dma_mem_free(&dma_p->acc_hdl);
982 		dma_p->acc_hdl = NULL;
983 	}
984 }
985 
986 /*
987  *
988  */
989 static int
990 iwk_alloc_fw_dma(iwk_sc_t *sc)
991 {
992 	int err = DDI_SUCCESS;
993 	iwk_dma_t *dma_p;
994 	char *t;
995 
996 	/*
997 	 * firmware image layout:
998 	 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->|
999 	 */
1000 	t = (char *)(sc->sc_hdr + 1);
1001 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz),
1002 	    &fw_dma_attr, &iwk_dma_accattr,
1003 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1004 	    &sc->sc_dma_fw_text);
1005 	dma_p = &sc->sc_dma_fw_text;
1006 	IWK_DBG((IWK_DEBUG_DMA, "text[ncookies:%d addr:%lx size:%lx]\n",
1007 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1008 	    dma_p->cookie.dmac_size));
1009 	if (err != DDI_SUCCESS) {
1010 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1011 		    " text dma memory");
1012 		goto fail;
1013 	}
1014 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->textsz));
1015 
1016 	t += LE_32(sc->sc_hdr->textsz);
1017 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1018 	    &fw_dma_attr, &iwk_dma_accattr,
1019 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1020 	    &sc->sc_dma_fw_data);
1021 	dma_p = &sc->sc_dma_fw_data;
1022 	IWK_DBG((IWK_DEBUG_DMA, "data[ncookies:%d addr:%lx size:%lx]\n",
1023 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1024 	    dma_p->cookie.dmac_size));
1025 	if (err != DDI_SUCCESS) {
1026 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1027 		    " data dma memory");
1028 		goto fail;
1029 	}
1030 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1031 
1032 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1033 	    &fw_dma_attr, &iwk_dma_accattr,
1034 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1035 	    &sc->sc_dma_fw_data_bak);
1036 	dma_p = &sc->sc_dma_fw_data_bak;
1037 	IWK_DBG((IWK_DEBUG_DMA, "data_bak[ncookies:%d addr:%lx "
1038 	    "size:%lx]\n",
1039 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1040 	    dma_p->cookie.dmac_size));
1041 	if (err != DDI_SUCCESS) {
1042 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1043 		    " data bakeup dma memory");
1044 		goto fail;
1045 	}
1046 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1047 
1048 	t += LE_32(sc->sc_hdr->datasz);
1049 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz),
1050 	    &fw_dma_attr, &iwk_dma_accattr,
1051 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1052 	    &sc->sc_dma_fw_init_text);
1053 	dma_p = &sc->sc_dma_fw_init_text;
1054 	IWK_DBG((IWK_DEBUG_DMA, "init_text[ncookies:%d addr:%lx "
1055 	    "size:%lx]\n",
1056 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1057 	    dma_p->cookie.dmac_size));
1058 	if (err != DDI_SUCCESS) {
1059 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1060 		    "init text dma memory");
1061 		goto fail;
1062 	}
1063 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_textsz));
1064 
1065 	t += LE_32(sc->sc_hdr->init_textsz);
1066 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz),
1067 	    &fw_dma_attr, &iwk_dma_accattr,
1068 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1069 	    &sc->sc_dma_fw_init_data);
1070 	dma_p = &sc->sc_dma_fw_init_data;
1071 	IWK_DBG((IWK_DEBUG_DMA, "init_data[ncookies:%d addr:%lx "
1072 	    "size:%lx]\n",
1073 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1074 	    dma_p->cookie.dmac_size));
1075 	if (err != DDI_SUCCESS) {
1076 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1077 		    "init data dma memory");
1078 		goto fail;
1079 	}
1080 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_datasz));
1081 
1082 	sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz);
1083 fail:
1084 	return (err);
1085 }
1086 
1087 static void
1088 iwk_free_fw_dma(iwk_sc_t *sc)
1089 {
1090 	iwk_free_dma_mem(&sc->sc_dma_fw_text);
1091 	iwk_free_dma_mem(&sc->sc_dma_fw_data);
1092 	iwk_free_dma_mem(&sc->sc_dma_fw_data_bak);
1093 	iwk_free_dma_mem(&sc->sc_dma_fw_init_text);
1094 	iwk_free_dma_mem(&sc->sc_dma_fw_init_data);
1095 }
1096 
1097 /*
1098  * Allocate a shared page between host and NIC.
1099  */
1100 static int
1101 iwk_alloc_shared(iwk_sc_t *sc)
1102 {
1103 	iwk_dma_t *dma_p;
1104 	int err = DDI_SUCCESS;
1105 
1106 	/* must be aligned on a 4K-page boundary */
1107 	err = iwk_alloc_dma_mem(sc, sizeof (iwk_shared_t),
1108 	    &sh_dma_attr, &iwk_dma_accattr,
1109 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1110 	    &sc->sc_dma_sh);
1111 	if (err != DDI_SUCCESS)
1112 		goto fail;
1113 	sc->sc_shared = (iwk_shared_t *)sc->sc_dma_sh.mem_va;
1114 
1115 	dma_p = &sc->sc_dma_sh;
1116 	IWK_DBG((IWK_DEBUG_DMA, "sh[ncookies:%d addr:%lx size:%lx]\n",
1117 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1118 	    dma_p->cookie.dmac_size));
1119 
1120 	return (err);
1121 fail:
1122 	iwk_free_shared(sc);
1123 	return (err);
1124 }
1125 
1126 static void
1127 iwk_free_shared(iwk_sc_t *sc)
1128 {
1129 	iwk_free_dma_mem(&sc->sc_dma_sh);
1130 }
1131 
1132 /*
1133  * Allocate a keep warm page.
1134  */
1135 static int
1136 iwk_alloc_kw(iwk_sc_t *sc)
1137 {
1138 	iwk_dma_t *dma_p;
1139 	int err = DDI_SUCCESS;
1140 
1141 	/* must be aligned on a 4K-page boundary */
1142 	err = iwk_alloc_dma_mem(sc, IWK_KW_SIZE,
1143 	    &kw_dma_attr, &iwk_dma_accattr,
1144 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1145 	    &sc->sc_dma_kw);
1146 	if (err != DDI_SUCCESS)
1147 		goto fail;
1148 
1149 	dma_p = &sc->sc_dma_kw;
1150 	IWK_DBG((IWK_DEBUG_DMA, "kw[ncookies:%d addr:%lx size:%lx]\n",
1151 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1152 	    dma_p->cookie.dmac_size));
1153 
1154 	return (err);
1155 fail:
1156 	iwk_free_kw(sc);
1157 	return (err);
1158 }
1159 
1160 static void
1161 iwk_free_kw(iwk_sc_t *sc)
1162 {
1163 	iwk_free_dma_mem(&sc->sc_dma_kw);
1164 }
1165 
1166 static int
1167 iwk_alloc_rx_ring(iwk_sc_t *sc)
1168 {
1169 	iwk_rx_ring_t *ring;
1170 	iwk_rx_data_t *data;
1171 	iwk_dma_t *dma_p;
1172 	int i, err = DDI_SUCCESS;
1173 
1174 	ring = &sc->sc_rxq;
1175 	ring->cur = 0;
1176 
1177 	err = iwk_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t),
1178 	    &ring_desc_dma_attr, &iwk_dma_accattr,
1179 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1180 	    &ring->dma_desc);
1181 	if (err != DDI_SUCCESS) {
1182 		IWK_DBG((IWK_DEBUG_DMA, "dma alloc rx ring desc "
1183 		    "failed\n"));
1184 		goto fail;
1185 	}
1186 	ring->desc = (uint32_t *)ring->dma_desc.mem_va;
1187 	dma_p = &ring->dma_desc;
1188 	IWK_DBG((IWK_DEBUG_DMA, "rx bd[ncookies:%d addr:%lx size:%lx]\n",
1189 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1190 	    dma_p->cookie.dmac_size));
1191 
1192 	/*
1193 	 * Allocate Rx buffers.
1194 	 */
1195 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1196 		data = &ring->data[i];
1197 		err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1198 		    &rx_buffer_dma_attr, &iwk_dma_accattr,
1199 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1200 		    &data->dma_data);
1201 		if (err != DDI_SUCCESS) {
1202 			IWK_DBG((IWK_DEBUG_DMA, "dma alloc rx ring "
1203 			    "buf[%d] failed\n", i));
1204 			goto fail;
1205 		}
1206 		/*
1207 		 * the physical address bit [8-36] are used,
1208 		 * instead of bit [0-31] in 3945.
1209 		 */
1210 		ring->desc[i] = LE_32((uint32_t)
1211 		    (data->dma_data.cookie.dmac_address >> 8));
1212 	}
1213 	dma_p = &ring->data[0].dma_data;
1214 	IWK_DBG((IWK_DEBUG_DMA, "rx buffer[0][ncookies:%d addr:%lx "
1215 	    "size:%lx]\n",
1216 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1217 	    dma_p->cookie.dmac_size));
1218 
1219 	IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1220 
1221 	return (err);
1222 
1223 fail:
1224 	iwk_free_rx_ring(sc);
1225 	return (err);
1226 }
1227 
1228 static void
1229 iwk_reset_rx_ring(iwk_sc_t *sc)
1230 {
1231 	int n;
1232 
1233 	iwk_mac_access_enter(sc);
1234 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1235 	for (n = 0; n < 2000; n++) {
1236 		if (IWK_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24))
1237 			break;
1238 		DELAY(1000);
1239 	}
1240 #ifdef DEBUG
1241 	if (n == 2000)
1242 		IWK_DBG((IWK_DEBUG_DMA, "timeout resetting Rx ring\n"));
1243 #endif
1244 	iwk_mac_access_exit(sc);
1245 
1246 	sc->sc_rxq.cur = 0;
1247 }
1248 
1249 static void
1250 iwk_free_rx_ring(iwk_sc_t *sc)
1251 {
1252 	int i;
1253 
1254 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1255 		if (sc->sc_rxq.data[i].dma_data.dma_hdl)
1256 			IWK_DMA_SYNC(sc->sc_rxq.data[i].dma_data,
1257 			    DDI_DMA_SYNC_FORCPU);
1258 		iwk_free_dma_mem(&sc->sc_rxq.data[i].dma_data);
1259 	}
1260 
1261 	if (sc->sc_rxq.dma_desc.dma_hdl)
1262 		IWK_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV);
1263 	iwk_free_dma_mem(&sc->sc_rxq.dma_desc);
1264 }
1265 
1266 static int
1267 iwk_alloc_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring,
1268     int slots, int qid)
1269 {
1270 	iwk_tx_data_t *data;
1271 	iwk_tx_desc_t *desc_h;
1272 	uint32_t paddr_desc_h;
1273 	iwk_cmd_t *cmd_h;
1274 	uint32_t paddr_cmd_h;
1275 	iwk_dma_t *dma_p;
1276 	int i, err = DDI_SUCCESS;
1277 
1278 	ring->qid = qid;
1279 	ring->count = TFD_QUEUE_SIZE_MAX;
1280 	ring->window = slots;
1281 	ring->queued = 0;
1282 	ring->cur = 0;
1283 
1284 	err = iwk_alloc_dma_mem(sc,
1285 	    TFD_QUEUE_SIZE_MAX * sizeof (iwk_tx_desc_t),
1286 	    &ring_desc_dma_attr, &iwk_dma_accattr,
1287 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1288 	    &ring->dma_desc);
1289 	if (err != DDI_SUCCESS) {
1290 		IWK_DBG((IWK_DEBUG_DMA, "dma alloc tx ring desc[%d]"
1291 		    " failed\n", qid));
1292 		goto fail;
1293 	}
1294 	dma_p = &ring->dma_desc;
1295 	IWK_DBG((IWK_DEBUG_DMA, "tx bd[ncookies:%d addr:%lx size:%lx]\n",
1296 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1297 	    dma_p->cookie.dmac_size));
1298 
1299 	desc_h = (iwk_tx_desc_t *)ring->dma_desc.mem_va;
1300 	paddr_desc_h = ring->dma_desc.cookie.dmac_address;
1301 
1302 	err = iwk_alloc_dma_mem(sc,
1303 	    TFD_QUEUE_SIZE_MAX * sizeof (iwk_cmd_t),
1304 	    &cmd_dma_attr, &iwk_dma_accattr,
1305 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1306 	    &ring->dma_cmd);
1307 	if (err != DDI_SUCCESS) {
1308 		IWK_DBG((IWK_DEBUG_DMA, "dma alloc tx ring cmd[%d]"
1309 		    " failed\n", qid));
1310 		goto fail;
1311 	}
1312 	dma_p = &ring->dma_cmd;
1313 	IWK_DBG((IWK_DEBUG_DMA, "tx cmd[ncookies:%d addr:%lx size:%lx]\n",
1314 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1315 	    dma_p->cookie.dmac_size));
1316 
1317 	cmd_h = (iwk_cmd_t *)ring->dma_cmd.mem_va;
1318 	paddr_cmd_h = ring->dma_cmd.cookie.dmac_address;
1319 
1320 	/*
1321 	 * Allocate Tx buffers.
1322 	 */
1323 	ring->data = kmem_zalloc(sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX,
1324 	    KM_NOSLEEP);
1325 	if (ring->data == NULL) {
1326 		IWK_DBG((IWK_DEBUG_DMA, "could not allocate "
1327 		    "tx data slots\n"));
1328 		goto fail;
1329 	}
1330 
1331 	for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) {
1332 		data = &ring->data[i];
1333 		err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1334 		    &tx_buffer_dma_attr, &iwk_dma_accattr,
1335 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
1336 		    &data->dma_data);
1337 		if (err != DDI_SUCCESS) {
1338 			IWK_DBG((IWK_DEBUG_DMA, "dma alloc tx "
1339 			    "ring buf[%d] failed\n", i));
1340 			goto fail;
1341 		}
1342 
1343 		data->desc = desc_h + i;
1344 		data->paddr_desc = paddr_desc_h +
1345 		    _PTRDIFF(data->desc, desc_h);
1346 		data->cmd = cmd_h +  i; /* (i % slots); */
1347 		/* ((i % slots) * sizeof (iwk_cmd_t)); */
1348 		data->paddr_cmd = paddr_cmd_h +
1349 		    _PTRDIFF(data->cmd, cmd_h);
1350 	}
1351 	dma_p = &ring->data[0].dma_data;
1352 	IWK_DBG((IWK_DEBUG_DMA, "tx buffer[0][ncookies:%d addr:%lx "
1353 	    "size:%lx]\n",
1354 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1355 	    dma_p->cookie.dmac_size));
1356 
1357 	return (err);
1358 
1359 fail:
1360 	if (ring->data)
1361 		kmem_free(ring->data,
1362 		    sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX);
1363 	iwk_free_tx_ring(sc, ring);
1364 	return (err);
1365 }
1366 
1367 static void
1368 iwk_reset_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring)
1369 {
1370 	iwk_tx_data_t *data;
1371 	int i, n;
1372 
1373 	iwk_mac_access_enter(sc);
1374 
1375 	IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0);
1376 	for (n = 0; n < 200; n++) {
1377 		if (IWK_READ(sc, IWK_FH_TSSR_TX_STATUS_REG) &
1378 		    IWK_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid))
1379 			break;
1380 		DELAY(10);
1381 	}
1382 #ifdef DEBUG
1383 	if (n == 200 && iwk_dbg_flags > 0) {
1384 		IWK_DBG((IWK_DEBUG_DMA, "timeout reset tx ring %d\n",
1385 		    ring->qid));
1386 	}
1387 #endif
1388 	iwk_mac_access_exit(sc);
1389 
1390 	for (i = 0; i < ring->count; i++) {
1391 		data = &ring->data[i];
1392 		IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
1393 	}
1394 
1395 	ring->queued = 0;
1396 	ring->cur = 0;
1397 }
1398 
1399 /*ARGSUSED*/
1400 static void
1401 iwk_free_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring)
1402 {
1403 	int i;
1404 
1405 	if (ring->dma_desc.dma_hdl != NULL)
1406 		IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1407 	iwk_free_dma_mem(&ring->dma_desc);
1408 
1409 	if (ring->dma_cmd.dma_hdl != NULL)
1410 		IWK_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV);
1411 	iwk_free_dma_mem(&ring->dma_cmd);
1412 
1413 	if (ring->data != NULL) {
1414 		for (i = 0; i < ring->count; i++) {
1415 			if (ring->data[i].dma_data.dma_hdl)
1416 				IWK_DMA_SYNC(ring->data[i].dma_data,
1417 				    DDI_DMA_SYNC_FORDEV);
1418 			iwk_free_dma_mem(&ring->data[i].dma_data);
1419 		}
1420 		kmem_free(ring->data, ring->count * sizeof (iwk_tx_data_t));
1421 	}
1422 }
1423 
1424 static int
1425 iwk_ring_init(iwk_sc_t *sc)
1426 {
1427 	int i, err = DDI_SUCCESS;
1428 
1429 	for (i = 0; i < IWK_NUM_QUEUES; i++) {
1430 		if (i == IWK_CMD_QUEUE_NUM)
1431 			continue;
1432 		err = iwk_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS,
1433 		    i);
1434 		if (err != DDI_SUCCESS)
1435 			goto fail;
1436 	}
1437 	err = iwk_alloc_tx_ring(sc, &sc->sc_txq[IWK_CMD_QUEUE_NUM],
1438 	    TFD_CMD_SLOTS, IWK_CMD_QUEUE_NUM);
1439 	if (err != DDI_SUCCESS)
1440 		goto fail;
1441 	err = iwk_alloc_rx_ring(sc);
1442 	if (err != DDI_SUCCESS)
1443 		goto fail;
1444 	return (err);
1445 
1446 fail:
1447 	return (err);
1448 }
1449 
1450 static void
1451 iwk_ring_free(iwk_sc_t *sc)
1452 {
1453 	int i = IWK_NUM_QUEUES;
1454 
1455 	iwk_free_rx_ring(sc);
1456 	while (--i >= 0) {
1457 		iwk_free_tx_ring(sc, &sc->sc_txq[i]);
1458 	}
1459 }
1460 
1461 /* ARGSUSED */
1462 static ieee80211_node_t *
1463 iwk_node_alloc(ieee80211com_t *ic)
1464 {
1465 	iwk_amrr_t *amrr;
1466 
1467 	amrr = kmem_zalloc(sizeof (iwk_amrr_t), KM_SLEEP);
1468 	if (amrr != NULL)
1469 		iwk_amrr_init(amrr);
1470 	return (&amrr->in);
1471 }
1472 
1473 static void
1474 iwk_node_free(ieee80211_node_t *in)
1475 {
1476 	ieee80211com_t *ic = in->in_ic;
1477 
1478 	ic->ic_node_cleanup(in);
1479 	if (in->in_wpa_ie != NULL)
1480 		ieee80211_free(in->in_wpa_ie);
1481 	kmem_free(in, sizeof (iwk_amrr_t));
1482 }
1483 
1484 /*ARGSUSED*/
1485 static int
1486 iwk_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1487 {
1488 	iwk_sc_t *sc = (iwk_sc_t *)ic;
1489 	ieee80211_node_t *in = ic->ic_bss;
1490 	enum ieee80211_state ostate = ic->ic_state;
1491 	int i, err = IWK_SUCCESS;
1492 
1493 	mutex_enter(&sc->sc_glock);
1494 	switch (nstate) {
1495 	case IEEE80211_S_SCAN:
1496 		ic->ic_state = nstate;
1497 		if (ostate == IEEE80211_S_INIT) {
1498 			ic->ic_flags |= IEEE80211_F_SCAN | IEEE80211_F_ASCAN;
1499 			/* let LED blink when scanning */
1500 			iwk_set_led(sc, 2, 10, 2);
1501 
1502 			if ((err = iwk_scan(sc)) != 0) {
1503 				IWK_DBG((IWK_DEBUG_80211,
1504 				    "could not initiate scan\n"));
1505 				ic->ic_flags &= ~(IEEE80211_F_SCAN |
1506 				    IEEE80211_F_ASCAN);
1507 				ic->ic_state = ostate;
1508 				mutex_exit(&sc->sc_glock);
1509 				return (err);
1510 			}
1511 		}
1512 		sc->sc_clk = 0;
1513 		mutex_exit(&sc->sc_glock);
1514 		return (IWK_SUCCESS);
1515 
1516 	case IEEE80211_S_AUTH:
1517 		/* reset state to handle reassociations correctly */
1518 		sc->sc_config.assoc_id = 0;
1519 		sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
1520 
1521 		/*
1522 		 * before sending authentication and association request frame,
1523 		 * we need do something in the hardware, such as setting the
1524 		 * channel same to the target AP...
1525 		 */
1526 		if ((err = iwk_hw_set_before_auth(sc)) != 0) {
1527 			IWK_DBG((IWK_DEBUG_80211,
1528 			    "could not send authentication request\n"));
1529 			mutex_exit(&sc->sc_glock);
1530 			return (err);
1531 		}
1532 		break;
1533 
1534 	case IEEE80211_S_RUN:
1535 		if (ic->ic_opmode == IEEE80211_M_MONITOR) {
1536 			/* let LED blink when monitoring */
1537 			iwk_set_led(sc, 2, 10, 10);
1538 			break;
1539 		}
1540 		IWK_DBG((IWK_DEBUG_80211, "iwk: associated."));
1541 
1542 		/* none IBSS mode */
1543 		if (ic->ic_opmode != IEEE80211_M_IBSS) {
1544 			/* update adapter's configuration */
1545 			sc->sc_config.assoc_id = sc->sc_assoc_id & 0x3fff;
1546 			/*
1547 			 * short preamble/slot time are
1548 			 * negotiated when associating
1549 			 */
1550 			sc->sc_config.flags &=
1551 			    ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
1552 			    RXON_FLG_SHORT_SLOT_MSK);
1553 
1554 			if (ic->ic_flags & IEEE80211_F_SHSLOT)
1555 				sc->sc_config.flags |=
1556 				    LE_32(RXON_FLG_SHORT_SLOT_MSK);
1557 
1558 			if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
1559 				sc->sc_config.flags |=
1560 				    LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
1561 
1562 			sc->sc_config.filter_flags |=
1563 			    LE_32(RXON_FILTER_ASSOC_MSK);
1564 
1565 			if (ic->ic_opmode != IEEE80211_M_STA)
1566 				sc->sc_config.filter_flags |=
1567 				    LE_32(RXON_FILTER_BCON_AWARE_MSK);
1568 
1569 			IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x"
1570 			    " filter_flags %x\n",
1571 			    sc->sc_config.chan, sc->sc_config.flags,
1572 			    sc->sc_config.filter_flags));
1573 			err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
1574 			    sizeof (iwk_rxon_cmd_t), 1);
1575 			if (err != IWK_SUCCESS) {
1576 				IWK_DBG((IWK_DEBUG_80211,
1577 				    "could not update configuration\n"));
1578 				mutex_exit(&sc->sc_glock);
1579 				return (err);
1580 			}
1581 		}
1582 
1583 		/* obtain current temperature of chipset */
1584 		sc->sc_tempera = iwk_curr_tempera(sc);
1585 
1586 		/*
1587 		 * make Tx power calibration to determine
1588 		 * the gains of DSP and radio
1589 		 */
1590 		err = iwk_tx_power_calibration(sc);
1591 		if (err) {
1592 			cmn_err(CE_WARN, "iwk_newstate(): "
1593 			    "failed to set tx power table\n");
1594 			return (err);
1595 		}
1596 
1597 		/* start automatic rate control */
1598 		mutex_enter(&sc->sc_mt_lock);
1599 		if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
1600 			sc->sc_flags |= IWK_F_RATE_AUTO_CTL;
1601 			/* set rate to some reasonable initial value */
1602 			i = in->in_rates.ir_nrates - 1;
1603 			while (i > 0 && IEEE80211_RATE(i) > 72)
1604 				i--;
1605 			in->in_txrate = i;
1606 		} else {
1607 			sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL;
1608 		}
1609 		mutex_exit(&sc->sc_mt_lock);
1610 
1611 		/* set LED on after associated */
1612 		iwk_set_led(sc, 2, 0, 1);
1613 		break;
1614 
1615 	case IEEE80211_S_INIT:
1616 		/* set LED off after init */
1617 		iwk_set_led(sc, 2, 1, 0);
1618 		break;
1619 	case IEEE80211_S_ASSOC:
1620 		break;
1621 	}
1622 
1623 	mutex_exit(&sc->sc_glock);
1624 
1625 	err = sc->sc_newstate(ic, nstate, arg);
1626 
1627 	if (nstate == IEEE80211_S_RUN) {
1628 
1629 		mutex_enter(&sc->sc_glock);
1630 
1631 		/*
1632 		 * make initialization for Receiver
1633 		 * sensitivity calibration
1634 		 */
1635 		err = iwk_rx_sens_init(sc);
1636 		if (err) {
1637 			cmn_err(CE_WARN, "iwk_newstate(): "
1638 			    "failed to init RX sensitivity\n");
1639 			return (err);
1640 		}
1641 
1642 		/* make initialization for Receiver gain balance */
1643 		err = iwk_rxgain_diff_init(sc);
1644 		if (err) {
1645 			cmn_err(CE_WARN, "iwk_newstate(): "
1646 			    "failed to init phy calibration\n");
1647 			return (err);
1648 		}
1649 
1650 		mutex_exit(&sc->sc_glock);
1651 
1652 	}
1653 
1654 	return (err);
1655 }
1656 
1657 /*ARGSUSED*/
1658 static int iwk_key_set(ieee80211com_t *ic, const struct ieee80211_key *k,
1659     const uint8_t mac[IEEE80211_ADDR_LEN])
1660 {
1661 	iwk_sc_t *sc = (iwk_sc_t *)ic;
1662 	iwk_add_sta_t node;
1663 	int err;
1664 
1665 	switch (k->wk_cipher->ic_cipher) {
1666 	case IEEE80211_CIPHER_WEP:
1667 	case IEEE80211_CIPHER_TKIP:
1668 		return (1); /* sofeware do it. */
1669 	case IEEE80211_CIPHER_AES_CCM:
1670 		break;
1671 	default:
1672 		return (0);
1673 	}
1674 	sc->sc_config.filter_flags &= ~(RXON_FILTER_DIS_DECRYPT_MSK |
1675 	    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
1676 
1677 	mutex_enter(&sc->sc_glock);
1678 
1679 	/* update ap/multicast node */
1680 	(void) memset(&node, 0, sizeof (node));
1681 	if (IEEE80211_IS_MULTICAST(mac)) {
1682 		(void) memset(node.bssid, 0xff, 6);
1683 		node.id = IWK_BROADCAST_ID;
1684 	} else {
1685 		IEEE80211_ADDR_COPY(node.bssid, ic->ic_bss->in_bssid);
1686 		node.id = IWK_AP_ID;
1687 	}
1688 	if (k->wk_flags & IEEE80211_KEY_XMIT) {
1689 		node.key_flags = 0;
1690 		node.keyp = k->wk_keyix;
1691 	} else {
1692 		node.key_flags = (1 << 14);
1693 		node.keyp = k->wk_keyix + 4;
1694 	}
1695 	(void) memcpy(node.key, k->wk_key, k->wk_keylen);
1696 	node.key_flags |= (STA_KEY_FLG_CCMP | (1 << 3) | (k->wk_keyix << 8));
1697 	node.sta_mask = STA_MODIFY_KEY_MASK;
1698 	node.control = 1;
1699 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
1700 	if (err != IWK_SUCCESS) {
1701 		cmn_err(CE_WARN, "iwk_key_set():"
1702 		    "failed to update ap node\n");
1703 		mutex_exit(&sc->sc_glock);
1704 		return (0);
1705 	}
1706 	mutex_exit(&sc->sc_glock);
1707 	return (1);
1708 }
1709 
1710 /*
1711  * exclusive access to mac begin.
1712  */
1713 static void
1714 iwk_mac_access_enter(iwk_sc_t *sc)
1715 {
1716 	uint32_t tmp;
1717 	int n;
1718 
1719 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
1720 	IWK_WRITE(sc, CSR_GP_CNTRL,
1721 	    tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1722 
1723 	/* wait until we succeed */
1724 	for (n = 0; n < 1000; n++) {
1725 		if ((IWK_READ(sc, CSR_GP_CNTRL) &
1726 		    (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1727 		    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) ==
1728 		    CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN)
1729 			break;
1730 		DELAY(10);
1731 	}
1732 	if (n == 1000)
1733 		IWK_DBG((IWK_DEBUG_PIO, "could not lock memory\n"));
1734 }
1735 
1736 /*
1737  * exclusive access to mac end.
1738  */
1739 static void
1740 iwk_mac_access_exit(iwk_sc_t *sc)
1741 {
1742 	uint32_t tmp = IWK_READ(sc, CSR_GP_CNTRL);
1743 	IWK_WRITE(sc, CSR_GP_CNTRL,
1744 	    tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1745 }
1746 
1747 static uint32_t
1748 iwk_mem_read(iwk_sc_t *sc, uint32_t addr)
1749 {
1750 	IWK_WRITE(sc, HBUS_TARG_MEM_RADDR, addr);
1751 	return (IWK_READ(sc, HBUS_TARG_MEM_RDAT));
1752 }
1753 
1754 static void
1755 iwk_mem_write(iwk_sc_t *sc, uint32_t addr, uint32_t data)
1756 {
1757 	IWK_WRITE(sc, HBUS_TARG_MEM_WADDR, addr);
1758 	IWK_WRITE(sc, HBUS_TARG_MEM_WDAT, data);
1759 }
1760 
1761 static uint32_t
1762 iwk_reg_read(iwk_sc_t *sc, uint32_t addr)
1763 {
1764 	IWK_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24));
1765 	return (IWK_READ(sc, HBUS_TARG_PRPH_RDAT));
1766 }
1767 
1768 static void
1769 iwk_reg_write(iwk_sc_t *sc, uint32_t addr, uint32_t data)
1770 {
1771 	IWK_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24));
1772 	IWK_WRITE(sc, HBUS_TARG_PRPH_WDAT, data);
1773 }
1774 
1775 static void
1776 iwk_reg_write_region_4(iwk_sc_t *sc, uint32_t addr,
1777     uint32_t *data, int wlen)
1778 {
1779 	for (; wlen > 0; wlen--, data++, addr += 4)
1780 		iwk_reg_write(sc, addr, *data);
1781 }
1782 
1783 
1784 /*
1785  * ucode load/initialization steps:
1786  * 1)  load Bootstrap State Machine (BSM) with "bootstrap" uCode image.
1787  * BSM contains a small memory that *always* stays powered up, so it can
1788  * retain the bootstrap program even when the card is in a power-saving
1789  * power-down state.  The BSM loads the small program into ARC processor's
1790  * instruction memory when triggered by power-up.
1791  * 2)  load Initialize image via bootstrap program.
1792  * The Initialize image sets up regulatory and calibration data for the
1793  * Runtime/Protocol uCode. This sends a REPLY_ALIVE notification when completed.
1794  * The 4965 reply contains calibration data for temperature, voltage and tx gain
1795  * correction.
1796  */
1797 static int
1798 iwk_load_firmware(iwk_sc_t *sc)
1799 {
1800 	uint32_t *boot_fw = (uint32_t *)sc->sc_boot;
1801 	uint32_t size = sc->sc_hdr->bootsz;
1802 	int n, err = IWK_SUCCESS;
1803 
1804 	/*
1805 	 * The physical address bit [4-35] of the initialize uCode.
1806 	 * In the initialize alive notify interrupt the physical address of
1807 	 * the runtime ucode will be set for loading.
1808 	 */
1809 	iwk_mac_access_enter(sc);
1810 
1811 	iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG,
1812 	    sc->sc_dma_fw_init_text.cookie.dmac_address >> 4);
1813 	iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG,
1814 	    sc->sc_dma_fw_init_data.cookie.dmac_address >> 4);
1815 	iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG,
1816 	    sc->sc_dma_fw_init_text.cookie.dmac_size);
1817 	iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG,
1818 	    sc->sc_dma_fw_init_data.cookie.dmac_size);
1819 
1820 	/* load bootstrap code into BSM memory */
1821 	iwk_reg_write_region_4(sc, BSM_SRAM_LOWER_BOUND, boot_fw,
1822 	    size / sizeof (uint32_t));
1823 
1824 	iwk_reg_write(sc, BSM_WR_MEM_SRC_REG, 0);
1825 	iwk_reg_write(sc, BSM_WR_MEM_DST_REG, RTC_INST_LOWER_BOUND);
1826 	iwk_reg_write(sc, BSM_WR_DWCOUNT_REG, size / sizeof (uint32_t));
1827 
1828 	/*
1829 	 * prepare to load initialize uCode
1830 	 */
1831 	iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
1832 
1833 	/* wait while the adapter is busy loading the firmware */
1834 	for (n = 0; n < 1000; n++) {
1835 		if (!(iwk_reg_read(sc, BSM_WR_CTRL_REG) &
1836 		    BSM_WR_CTRL_REG_BIT_START))
1837 			break;
1838 		DELAY(10);
1839 	}
1840 	if (n == 1000) {
1841 		IWK_DBG((IWK_DEBUG_FW,
1842 		    "timeout transferring firmware\n"));
1843 		err = ETIMEDOUT;
1844 		return (err);
1845 	}
1846 
1847 	/* for future power-save mode use */
1848 	iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
1849 
1850 	iwk_mac_access_exit(sc);
1851 
1852 	return (err);
1853 }
1854 
1855 /*ARGSUSED*/
1856 static void
1857 iwk_rx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data)
1858 {
1859 	ieee80211com_t *ic = &sc->sc_ic;
1860 	iwk_rx_ring_t *ring = &sc->sc_rxq;
1861 	iwk_rx_phy_res_t *stat;
1862 	ieee80211_node_t *in;
1863 	uint32_t *tail;
1864 	struct ieee80211_frame *wh;
1865 	mblk_t *mp;
1866 	uint16_t len, rssi, mrssi, agc;
1867 	int16_t t;
1868 	uint32_t ants, i;
1869 	struct iwk_rx_non_cfg_phy *phyinfo;
1870 
1871 	/* assuming not 11n here. cope with 11n in phase-II */
1872 	stat = (iwk_rx_phy_res_t *)(desc + 1);
1873 	if (stat->cfg_phy_cnt > 20) {
1874 		return;
1875 	}
1876 
1877 	phyinfo = (struct iwk_rx_non_cfg_phy *)stat->non_cfg_phy;
1878 	agc = (phyinfo->agc_info & IWK_AGC_DB_MASK) >> IWK_AGC_DB_POS;
1879 	mrssi = 0;
1880 	ants = (stat->phy_flags & RX_PHY_FLAGS_ANTENNAE_MASK) >>
1881 	    RX_PHY_FLAGS_ANTENNAE_OFFSET;
1882 	for (i = 0; i < 3; i++) {
1883 		if (ants & (1 << i))
1884 			mrssi = MAX(mrssi, phyinfo->rssi_info[i << 1]);
1885 	}
1886 	t = mrssi - agc - 44; /* t is the dBM value */
1887 	/*
1888 	 * convert dBm to percentage ???
1889 	 */
1890 	rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t))) /
1891 	    (75 * 75);
1892 	if (rssi > 100)
1893 		rssi = 100;
1894 	if (rssi < 1)
1895 		rssi = 1;
1896 	len = stat->byte_count;
1897 	tail = (uint32_t *)((uint8_t *)(stat + 1) + stat->cfg_phy_cnt + len);
1898 
1899 	IWK_DBG((IWK_DEBUG_RX, "rx intr: idx=%d phy_len=%x len=%d "
1900 	    "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x "
1901 	    "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat),
1902 	    len, stat->rate.r.s.rate, stat->channel,
1903 	    LE_32(stat->timestampl), stat->non_cfg_phy_cnt,
1904 	    stat->cfg_phy_cnt, LE_32(*tail)));
1905 
1906 	if ((len < 16) || (len > sc->sc_dmabuf_sz)) {
1907 		IWK_DBG((IWK_DEBUG_RX, "rx frame oversize\n"));
1908 		return;
1909 	}
1910 
1911 	/*
1912 	 * discard Rx frames with bad CRC
1913 	 */
1914 	if ((LE_32(*tail) &
1915 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) !=
1916 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) {
1917 		IWK_DBG((IWK_DEBUG_RX, "rx crc error tail: %x\n",
1918 		    LE_32(*tail)));
1919 		sc->sc_rx_err++;
1920 		return;
1921 	}
1922 
1923 	wh = (struct ieee80211_frame *)
1924 	    ((uint8_t *)(stat + 1)+ stat->cfg_phy_cnt);
1925 	if (*(uint8_t *)wh == IEEE80211_FC0_SUBTYPE_ASSOC_RESP) {
1926 		sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2);
1927 		IWK_DBG((IWK_DEBUG_RX, "rx : association id = %x\n",
1928 		    sc->sc_assoc_id));
1929 	}
1930 #ifdef DEBUG
1931 	if (iwk_dbg_flags & IWK_DEBUG_RX)
1932 		ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0);
1933 #endif
1934 	in = ieee80211_find_rxnode(ic, wh);
1935 	mp = allocb(len, BPRI_MED);
1936 	if (mp) {
1937 		(void) memcpy(mp->b_wptr, wh, len);
1938 		mp->b_wptr += len;
1939 
1940 		/* send the frame to the 802.11 layer */
1941 		(void) ieee80211_input(ic, mp, in, rssi, 0);
1942 	} else {
1943 		sc->sc_rx_nobuf++;
1944 		IWK_DBG((IWK_DEBUG_RX,
1945 		    "iwk_rx_intr(): alloc rx buf failed\n"));
1946 	}
1947 	/* release node reference */
1948 	ieee80211_free_node(in);
1949 }
1950 
1951 /*ARGSUSED*/
1952 static void
1953 iwk_tx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data)
1954 {
1955 	ieee80211com_t *ic = &sc->sc_ic;
1956 	iwk_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3];
1957 	iwk_tx_stat_t *stat = (iwk_tx_stat_t *)(desc + 1);
1958 	iwk_amrr_t *amrr = (iwk_amrr_t *)ic->ic_bss;
1959 
1960 	IWK_DBG((IWK_DEBUG_TX, "tx done: qid=%d idx=%d"
1961 	    " retries=%d frame_count=%x nkill=%d "
1962 	    "rate=%x duration=%d status=%x\n",
1963 	    desc->hdr.qid, desc->hdr.idx, stat->ntries, stat->frame_count,
1964 	    stat->bt_kill_count, stat->rate.r.s.rate,
1965 	    LE_32(stat->duration), LE_32(stat->status)));
1966 
1967 	amrr->txcnt++;
1968 	IWK_DBG((IWK_DEBUG_RATECTL, "tx: %d cnt\n", amrr->txcnt));
1969 	if (stat->ntries > 0) {
1970 		amrr->retrycnt++;
1971 		sc->sc_tx_retries++;
1972 		IWK_DBG((IWK_DEBUG_TX, "tx: %d retries\n",
1973 		    sc->sc_tx_retries));
1974 	}
1975 
1976 	sc->sc_tx_timer = 0;
1977 
1978 	mutex_enter(&sc->sc_tx_lock);
1979 	ring->queued--;
1980 	if (ring->queued < 0)
1981 		ring->queued = 0;
1982 	if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count << 3))) {
1983 		sc->sc_need_reschedule = 0;
1984 		mutex_exit(&sc->sc_tx_lock);
1985 		mac_tx_update(ic->ic_mach);
1986 		mutex_enter(&sc->sc_tx_lock);
1987 	}
1988 	mutex_exit(&sc->sc_tx_lock);
1989 }
1990 
1991 static void
1992 iwk_cmd_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc)
1993 {
1994 	if ((desc->hdr.qid & 7) != 4) {
1995 		return;
1996 	}
1997 	mutex_enter(&sc->sc_glock);
1998 	sc->sc_flags |= IWK_F_CMD_DONE;
1999 	cv_signal(&sc->sc_cmd_cv);
2000 	mutex_exit(&sc->sc_glock);
2001 	IWK_DBG((IWK_DEBUG_CMD, "rx cmd: "
2002 	    "qid=%x idx=%d flags=%x type=0x%x\n",
2003 	    desc->hdr.qid, desc->hdr.idx, desc->hdr.flags,
2004 	    desc->hdr.type));
2005 }
2006 
2007 static void
2008 iwk_ucode_alive(iwk_sc_t *sc, iwk_rx_desc_t *desc)
2009 {
2010 	uint32_t base, i;
2011 	struct iwk_alive_resp *ar =
2012 	    (struct iwk_alive_resp *)(desc + 1);
2013 
2014 	/* the microcontroller is ready */
2015 	IWK_DBG((IWK_DEBUG_FW,
2016 	    "microcode alive notification minor: %x major: %x type:"
2017 	    " %x subtype: %x\n",
2018 	    ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype));
2019 
2020 	if (LE_32(ar->is_valid) != UCODE_VALID_OK) {
2021 		IWK_DBG((IWK_DEBUG_FW,
2022 		    "microcontroller initialization failed\n"));
2023 	}
2024 	if (ar->ver_subtype == INITIALIZE_SUBTYPE) {
2025 		IWK_DBG((IWK_DEBUG_FW,
2026 		    "initialization alive received.\n"));
2027 		(void) memcpy(&sc->sc_card_alive_init, ar,
2028 		    sizeof (struct iwk_init_alive_resp));
2029 		/* XXX get temperature */
2030 		iwk_mac_access_enter(sc);
2031 		iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG,
2032 		    sc->sc_dma_fw_text.cookie.dmac_address >> 4);
2033 		iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG,
2034 		    sc->sc_dma_fw_data_bak.cookie.dmac_address >> 4);
2035 		iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG,
2036 		    sc->sc_dma_fw_data.cookie.dmac_size);
2037 		iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG,
2038 		    sc->sc_dma_fw_text.cookie.dmac_size | 0x80000000);
2039 		iwk_mac_access_exit(sc);
2040 	} else {
2041 		IWK_DBG((IWK_DEBUG_FW, "runtime alive received.\n"));
2042 		(void) memcpy(&sc->sc_card_alive_run, ar,
2043 		    sizeof (struct iwk_alive_resp));
2044 
2045 		/*
2046 		 * Init SCD related registers to make Tx work. XXX
2047 		 */
2048 		iwk_mac_access_enter(sc);
2049 
2050 		/* read sram address of data base */
2051 		sc->sc_scd_base = iwk_reg_read(sc, SCD_SRAM_BASE_ADDR);
2052 
2053 		/* clear and init SCD_CONTEXT_DATA_OFFSET area. 128 bytes */
2054 		for (base = sc->sc_scd_base + SCD_CONTEXT_DATA_OFFSET, i = 0;
2055 		    i < 128; i += 4)
2056 			iwk_mem_write(sc, base + i, 0);
2057 
2058 		/* clear and init SCD_TX_STTS_BITMAP_OFFSET area. 256 bytes */
2059 		for (base = sc->sc_scd_base + SCD_TX_STTS_BITMAP_OFFSET;
2060 		    i < 256; i += 4)
2061 			iwk_mem_write(sc, base + i, 0);
2062 
2063 		/* clear and init SCD_TRANSLATE_TBL_OFFSET area. 32 bytes */
2064 		for (base = sc->sc_scd_base + SCD_TRANSLATE_TBL_OFFSET;
2065 		    i < sizeof (uint16_t) * IWK_NUM_QUEUES; i += 4)
2066 			iwk_mem_write(sc, base + i, 0);
2067 
2068 		iwk_reg_write(sc, SCD_DRAM_BASE_ADDR,
2069 		    sc->sc_dma_sh.cookie.dmac_address >> 10);
2070 		iwk_reg_write(sc, SCD_QUEUECHAIN_SEL, 0);
2071 
2072 		/* initiate the tx queues */
2073 		for (i = 0; i < IWK_NUM_QUEUES; i++) {
2074 			iwk_reg_write(sc, SCD_QUEUE_RDPTR(i), 0);
2075 			IWK_WRITE(sc, HBUS_TARG_WRPTR, (i << 8));
2076 			iwk_mem_write(sc, sc->sc_scd_base +
2077 			    SCD_CONTEXT_QUEUE_OFFSET(i),
2078 			    (SCD_WIN_SIZE & 0x7f));
2079 			iwk_mem_write(sc, sc->sc_scd_base +
2080 			    SCD_CONTEXT_QUEUE_OFFSET(i) + sizeof (uint32_t),
2081 			    (SCD_FRAME_LIMIT & 0x7f) << 16);
2082 		}
2083 		/* interrupt enable on each queue0-7 */
2084 		iwk_reg_write(sc, SCD_INTERRUPT_MASK,
2085 		    (1 << IWK_NUM_QUEUES) - 1);
2086 		/* enable  each channel 0-7 */
2087 		iwk_reg_write(sc, SCD_TXFACT,
2088 		    SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
2089 		/*
2090 		 * queue 0-7 maps to FIFO 0-7 and
2091 		 * all queues work under FIFO mode (none-scheduler-ack)
2092 		 */
2093 		for (i = 0; i < 7; i++) {
2094 			iwk_reg_write(sc,
2095 			    SCD_QUEUE_STATUS_BITS(i),
2096 			    (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
2097 			    (i << SCD_QUEUE_STTS_REG_POS_TXF)|
2098 			    SCD_QUEUE_STTS_REG_MSK);
2099 		}
2100 		iwk_mac_access_exit(sc);
2101 
2102 		sc->sc_flags |= IWK_F_FW_INIT;
2103 		cv_signal(&sc->sc_fw_cv);
2104 	}
2105 
2106 }
2107 
2108 static uint_t
2109 /* LINTED: argument unused in function: unused */
2110 iwk_rx_softintr(caddr_t arg, caddr_t unused)
2111 {
2112 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2113 	ieee80211com_t *ic = &sc->sc_ic;
2114 	iwk_rx_desc_t *desc;
2115 	iwk_rx_data_t *data;
2116 	uint32_t index;
2117 
2118 	mutex_enter(&sc->sc_glock);
2119 	if (sc->sc_rx_softint_pending != 1) {
2120 		mutex_exit(&sc->sc_glock);
2121 		return (DDI_INTR_UNCLAIMED);
2122 	}
2123 	/* disable interrupts */
2124 	IWK_WRITE(sc, CSR_INT_MASK, 0);
2125 	mutex_exit(&sc->sc_glock);
2126 
2127 	/*
2128 	 * firmware has moved the index of the rx queue, driver get it,
2129 	 * and deal with it.
2130 	 */
2131 	index = LE_32(sc->sc_shared->val0) & 0xfff;
2132 
2133 	while (sc->sc_rxq.cur != index) {
2134 		data = &sc->sc_rxq.data[sc->sc_rxq.cur];
2135 		desc = (iwk_rx_desc_t *)data->dma_data.mem_va;
2136 
2137 		IWK_DBG((IWK_DEBUG_INTR, "rx notification index = %d"
2138 		    " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n",
2139 		    index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx,
2140 		    desc->hdr.flags, desc->hdr.type, LE_32(desc->len)));
2141 
2142 		/* a command other than a tx need to be replied */
2143 		if (!(desc->hdr.qid & 0x80) &&
2144 		    (desc->hdr.type != REPLY_RX_PHY_CMD) &&
2145 		    (desc->hdr.type != REPLY_TX) &&
2146 		    (desc->hdr.type != REPLY_TX_PWR_TABLE_CMD) &&
2147 		    (desc->hdr.type != REPLY_PHY_CALIBRATION_CMD) &&
2148 		    (desc->hdr.type != SENSITIVITY_CMD))
2149 			iwk_cmd_intr(sc, desc);
2150 
2151 		switch (desc->hdr.type) {
2152 		case REPLY_4965_RX:
2153 			iwk_rx_intr(sc, desc, data);
2154 			break;
2155 
2156 		case REPLY_TX:
2157 			iwk_tx_intr(sc, desc, data);
2158 			break;
2159 
2160 		case REPLY_ALIVE:
2161 			iwk_ucode_alive(sc, desc);
2162 			break;
2163 
2164 		case CARD_STATE_NOTIFICATION:
2165 		{
2166 			uint32_t *status = (uint32_t *)(desc + 1);
2167 
2168 			IWK_DBG((IWK_DEBUG_RADIO, "state changed to %x\n",
2169 			    LE_32(*status)));
2170 
2171 			if (LE_32(*status) & 1) {
2172 				/*
2173 				 * the radio button has to be pushed(OFF). It
2174 				 * is considered as a hw error, the
2175 				 * iwk_thread() tries to recover it after the
2176 				 * button is pushed again(ON)
2177 				 */
2178 				cmn_err(CE_NOTE,
2179 				    "iwk_rx_softintr(): "
2180 				    "Radio transmitter is off\n");
2181 				sc->sc_ostate = sc->sc_ic.ic_state;
2182 				ieee80211_new_state(&sc->sc_ic,
2183 				    IEEE80211_S_INIT, -1);
2184 				sc->sc_flags |=
2185 				    (IWK_F_HW_ERR_RECOVER | IWK_F_RADIO_OFF);
2186 			}
2187 			break;
2188 		}
2189 		case SCAN_START_NOTIFICATION:
2190 		{
2191 			iwk_start_scan_t *scan =
2192 			    (iwk_start_scan_t *)(desc + 1);
2193 
2194 			IWK_DBG((IWK_DEBUG_SCAN,
2195 			    "scanning channel %d status %x\n",
2196 			    scan->chan, LE_32(scan->status)));
2197 
2198 			ic->ic_curchan = &ic->ic_sup_channels[scan->chan];
2199 			break;
2200 		}
2201 		case SCAN_COMPLETE_NOTIFICATION:
2202 			IWK_DBG((IWK_DEBUG_SCAN, "scan finished\n"));
2203 			sc->sc_flags &= ~IWK_F_SCANNING;
2204 			ieee80211_end_scan(ic);
2205 			break;
2206 		case STATISTICS_NOTIFICATION:
2207 		{
2208 			/* handle statistics notification */
2209 			iwk_statistics_notify(sc, desc);
2210 			break;
2211 		}
2212 
2213 		}
2214 
2215 		sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE;
2216 	}
2217 
2218 	/*
2219 	 * driver dealt with what reveived in rx queue and tell the information
2220 	 * to the firmware.
2221 	 */
2222 	index = (index == 0) ? RX_QUEUE_SIZE - 1 : index - 1;
2223 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7));
2224 
2225 	mutex_enter(&sc->sc_glock);
2226 	/* re-enable interrupts */
2227 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2228 	sc->sc_rx_softint_pending = 0;
2229 	mutex_exit(&sc->sc_glock);
2230 
2231 	return (DDI_INTR_CLAIMED);
2232 }
2233 
2234 static uint_t
2235 /* LINTED: argument unused in function: unused */
2236 iwk_intr(caddr_t arg, caddr_t unused)
2237 {
2238 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2239 	uint32_t r, rfh;
2240 
2241 	mutex_enter(&sc->sc_glock);
2242 
2243 	if (sc->sc_flags & IWK_F_SUSPEND) {
2244 		mutex_exit(&sc->sc_glock);
2245 		return (DDI_INTR_UNCLAIMED);
2246 	}
2247 
2248 	r = IWK_READ(sc, CSR_INT);
2249 	if (r == 0 || r == 0xffffffff) {
2250 		mutex_exit(&sc->sc_glock);
2251 		return (DDI_INTR_UNCLAIMED);
2252 	}
2253 
2254 	IWK_DBG((IWK_DEBUG_INTR, "interrupt reg %x\n", r));
2255 
2256 	rfh = IWK_READ(sc, CSR_FH_INT_STATUS);
2257 	IWK_DBG((IWK_DEBUG_INTR, "FH interrupt reg %x\n", rfh));
2258 	/* disable interrupts */
2259 	IWK_WRITE(sc, CSR_INT_MASK, 0);
2260 	/* ack interrupts */
2261 	IWK_WRITE(sc, CSR_INT, r);
2262 	IWK_WRITE(sc, CSR_FH_INT_STATUS, rfh);
2263 
2264 	if (sc->sc_soft_hdl == NULL) {
2265 		mutex_exit(&sc->sc_glock);
2266 		return (DDI_INTR_CLAIMED);
2267 	}
2268 	if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) {
2269 		IWK_DBG((IWK_DEBUG_FW, "fatal firmware error\n"));
2270 		mutex_exit(&sc->sc_glock);
2271 #ifdef DEBUG
2272 		/* dump event and error logs to dmesg */
2273 		iwk_write_error_log(sc);
2274 		iwk_write_event_log(sc);
2275 #endif /* DEBUG */
2276 		iwk_stop(sc);
2277 		sc->sc_ostate = sc->sc_ic.ic_state;
2278 		ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
2279 		sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
2280 		return (DDI_INTR_CLAIMED);
2281 	}
2282 
2283 	if (r & BIT_INT_RF_KILL) {
2284 		IWK_DBG((IWK_DEBUG_RADIO, "RF kill\n"));
2285 	}
2286 
2287 	if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) ||
2288 	    (rfh & FH_INT_RX_MASK)) {
2289 		sc->sc_rx_softint_pending = 1;
2290 		(void) ddi_intr_trigger_softint(sc->sc_soft_hdl, NULL);
2291 	}
2292 
2293 	if (r & BIT_INT_ALIVE)	{
2294 		IWK_DBG((IWK_DEBUG_FW, "firmware initialized.\n"));
2295 	}
2296 
2297 	/* re-enable interrupts */
2298 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2299 	mutex_exit(&sc->sc_glock);
2300 
2301 	return (DDI_INTR_CLAIMED);
2302 }
2303 
2304 static uint8_t
2305 iwk_rate_to_plcp(int rate)
2306 {
2307 	uint8_t ret;
2308 
2309 	switch (rate) {
2310 	/* CCK rates */
2311 	case 2:
2312 		ret = 0xa;
2313 		break;
2314 	case 4:
2315 		ret = 0x14;
2316 		break;
2317 	case 11:
2318 		ret = 0x37;
2319 		break;
2320 	case 22:
2321 		ret = 0x6e;
2322 		break;
2323 	/* OFDM rates */
2324 	case 12:
2325 		ret = 0xd;
2326 		break;
2327 	case 18:
2328 		ret = 0xf;
2329 		break;
2330 	case 24:
2331 		ret = 0x5;
2332 		break;
2333 	case 36:
2334 		ret = 0x7;
2335 		break;
2336 	case 48:
2337 		ret = 0x9;
2338 		break;
2339 	case 72:
2340 		ret = 0xb;
2341 		break;
2342 	case 96:
2343 		ret = 0x1;
2344 		break;
2345 	case 108:
2346 		ret = 0x3;
2347 		break;
2348 	default:
2349 		ret = 0;
2350 		break;
2351 	}
2352 	return (ret);
2353 }
2354 
2355 static mblk_t *
2356 iwk_m_tx(void *arg, mblk_t *mp)
2357 {
2358 	iwk_sc_t	*sc = (iwk_sc_t *)arg;
2359 	ieee80211com_t	*ic = &sc->sc_ic;
2360 	mblk_t			*next;
2361 
2362 	if (sc->sc_flags & IWK_F_SUSPEND) {
2363 		freemsgchain(mp);
2364 		return (NULL);
2365 	}
2366 
2367 	if (ic->ic_state != IEEE80211_S_RUN) {
2368 		freemsgchain(mp);
2369 		return (NULL);
2370 	}
2371 
2372 	while (mp != NULL) {
2373 		next = mp->b_next;
2374 		mp->b_next = NULL;
2375 		if (iwk_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) {
2376 			mp->b_next = next;
2377 			break;
2378 		}
2379 		mp = next;
2380 	}
2381 	return (mp);
2382 }
2383 
2384 /* ARGSUSED */
2385 static int
2386 iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
2387 {
2388 	iwk_sc_t *sc = (iwk_sc_t *)ic;
2389 	iwk_tx_ring_t *ring;
2390 	iwk_tx_desc_t *desc;
2391 	iwk_tx_data_t *data;
2392 	iwk_cmd_t *cmd;
2393 	iwk_tx_cmd_t *tx;
2394 	ieee80211_node_t *in;
2395 	struct ieee80211_frame *wh;
2396 	struct ieee80211_key *k = NULL;
2397 	mblk_t *m, *m0;
2398 	int rate, hdrlen, len, len0, mblen, off, err = IWK_SUCCESS;
2399 	uint16_t masks = 0;
2400 
2401 	ring = &sc->sc_txq[0];
2402 	data = &ring->data[ring->cur];
2403 	desc = data->desc;
2404 	cmd = data->cmd;
2405 	bzero(desc, sizeof (*desc));
2406 	bzero(cmd, sizeof (*cmd));
2407 
2408 	mutex_enter(&sc->sc_tx_lock);
2409 	if (sc->sc_flags & IWK_F_SUSPEND) {
2410 		mutex_exit(&sc->sc_tx_lock);
2411 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
2412 		    IEEE80211_FC0_TYPE_DATA) {
2413 			freemsg(mp);
2414 		}
2415 		err = IWK_FAIL;
2416 		goto exit;
2417 	}
2418 
2419 	if (ring->queued > ring->count - 64) {
2420 		IWK_DBG((IWK_DEBUG_TX, "iwk_send(): no txbuf\n"));
2421 		sc->sc_need_reschedule = 1;
2422 		mutex_exit(&sc->sc_tx_lock);
2423 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
2424 		    IEEE80211_FC0_TYPE_DATA) {
2425 			freemsg(mp);
2426 		}
2427 		sc->sc_tx_nobuf++;
2428 		err = IWK_FAIL;
2429 		goto exit;
2430 	}
2431 	mutex_exit(&sc->sc_tx_lock);
2432 
2433 	hdrlen = sizeof (struct ieee80211_frame);
2434 
2435 	m = allocb(msgdsize(mp) + 32, BPRI_MED);
2436 	if (m == NULL) { /* can not alloc buf, drop this package */
2437 		cmn_err(CE_WARN,
2438 		    "iwk_send(): failed to allocate msgbuf\n");
2439 		freemsg(mp);
2440 		err = IWK_SUCCESS;
2441 		goto exit;
2442 	}
2443 	for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) {
2444 		mblen = MBLKL(m0);
2445 		(void) memcpy(m->b_rptr + off, m0->b_rptr, mblen);
2446 		off += mblen;
2447 	}
2448 	m->b_wptr += off;
2449 	freemsg(mp);
2450 
2451 	wh = (struct ieee80211_frame *)m->b_rptr;
2452 
2453 	in = ieee80211_find_txnode(ic, wh->i_addr1);
2454 	if (in == NULL) {
2455 		cmn_err(CE_WARN, "iwk_send(): failed to find tx node\n");
2456 		freemsg(m);
2457 		sc->sc_tx_err++;
2458 		err = IWK_SUCCESS;
2459 		goto exit;
2460 	}
2461 	(void) ieee80211_encap(ic, m, in);
2462 
2463 	cmd->hdr.type = REPLY_TX;
2464 	cmd->hdr.flags = 0;
2465 	cmd->hdr.qid = ring->qid;
2466 	cmd->hdr.idx = ring->cur;
2467 
2468 	tx = (iwk_tx_cmd_t *)cmd->data;
2469 	tx->tx_flags = 0;
2470 
2471 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2472 		tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK));
2473 	} else {
2474 		tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
2475 	}
2476 
2477 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
2478 		k = ieee80211_crypto_encap(ic, m);
2479 		if (k == NULL) {
2480 			freemsg(m);
2481 			sc->sc_tx_err++;
2482 			err = IWK_SUCCESS;
2483 			goto exit;
2484 		}
2485 
2486 		if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
2487 			tx->sec_ctl = 2; /* for CCMP */
2488 			tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
2489 			(void) memcpy(&tx->key, k->wk_key, k->wk_keylen);
2490 		}
2491 
2492 		/* packet header may have moved, reset our local pointer */
2493 		wh = (struct ieee80211_frame *)m->b_rptr;
2494 	}
2495 
2496 	len = msgdsize(m);
2497 
2498 #ifdef DEBUG
2499 	if (iwk_dbg_flags & IWK_DEBUG_TX)
2500 		ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0);
2501 #endif
2502 
2503 	/* pickup a rate */
2504 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
2505 	    IEEE80211_FC0_TYPE_MGT) {
2506 		/* mgmt frames are sent at 1M */
2507 		rate = in->in_rates.ir_rates[0];
2508 	} else {
2509 		/*
2510 		 * do it here for the software way rate control.
2511 		 * later for rate scaling in hardware.
2512 		 * maybe like the following, for management frame:
2513 		 * tx->initial_rate_index = LINK_QUAL_MAX_RETRY_NUM - 1;
2514 		 * for data frame:
2515 		 * tx->tx_flags |= (LE_32(TX_CMD_FLG_STA_RATE_MSK));
2516 		 * rate = in->in_rates.ir_rates[in->in_txrate];
2517 		 * tx->initial_rate_index = 1;
2518 		 *
2519 		 * now the txrate is determined in tx cmd flags, set to the
2520 		 * max value 54M for 11g and 11M for 11b.
2521 		 */
2522 
2523 		if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
2524 			rate = ic->ic_fixed_rate;
2525 		} else {
2526 			rate = in->in_rates.ir_rates[in->in_txrate];
2527 		}
2528 	}
2529 	rate &= IEEE80211_RATE_VAL;
2530 	IWK_DBG((IWK_DEBUG_TX, "tx rate[%d of %d] = %x",
2531 	    in->in_txrate, in->in_rates.ir_nrates, rate));
2532 
2533 	tx->tx_flags |= (LE_32(TX_CMD_FLG_SEQ_CTL_MSK));
2534 
2535 	len0 = roundup(4 + sizeof (iwk_tx_cmd_t) + hdrlen, 4);
2536 	if (len0 != (4 + sizeof (iwk_tx_cmd_t) + hdrlen))
2537 		tx->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
2538 
2539 	/* retrieve destination node's id */
2540 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2541 		tx->sta_id = IWK_BROADCAST_ID;
2542 	} else {
2543 		if (ic->ic_opmode != IEEE80211_M_IBSS)
2544 			tx->sta_id = IWK_AP_ID;
2545 	}
2546 
2547 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
2548 	    IEEE80211_FC0_TYPE_MGT) {
2549 		/* tell h/w to set timestamp in probe responses */
2550 		if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2551 		    IEEE80211_FC0_SUBTYPE_PROBE_RESP)
2552 			tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK);
2553 
2554 		if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2555 		    IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
2556 		    ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2557 		    IEEE80211_FC0_SUBTYPE_REASSOC_REQ))
2558 			tx->timeout.pm_frame_timeout = 3;
2559 		else
2560 			tx->timeout.pm_frame_timeout = 2;
2561 	} else
2562 		tx->timeout.pm_frame_timeout = 0;
2563 	if (rate == 2 || rate == 4 || rate == 11 || rate == 22)
2564 		masks |= RATE_MCS_CCK_MSK;
2565 
2566 	masks |= RATE_MCS_ANT_B_MSK;
2567 	tx->rate.r.rate_n_flags = (iwk_rate_to_plcp(rate) | masks);
2568 
2569 	IWK_DBG((IWK_DEBUG_TX, "tx flag = %x",
2570 	    tx->tx_flags));
2571 
2572 	tx->rts_retry_limit = 60;
2573 	tx->data_retry_limit = 15;
2574 
2575 	tx->stop_time.life_time  = LE_32(0xffffffff);
2576 
2577 	tx->len = LE_16(len);
2578 
2579 	tx->dram_lsb_ptr =
2580 	    data->paddr_cmd + 4 + offsetof(iwk_tx_cmd_t, scratch);
2581 	tx->dram_msb_ptr = 0;
2582 	tx->driver_txop = 0;
2583 	tx->next_frame_len = 0;
2584 
2585 	(void) memcpy(tx + 1, m->b_rptr, hdrlen);
2586 	m->b_rptr += hdrlen;
2587 	(void) memcpy(data->dma_data.mem_va, m->b_rptr, len - hdrlen);
2588 
2589 	IWK_DBG((IWK_DEBUG_TX, "sending data: qid=%d idx=%d len=%d",
2590 	    ring->qid, ring->cur, len));
2591 
2592 	/*
2593 	 * first segment includes the tx cmd plus the 802.11 header,
2594 	 * the second includes the remaining of the 802.11 frame.
2595 	 */
2596 	desc->val0 = LE_32(2 << 24);
2597 	desc->pa[0].tb1_addr = LE_32(data->paddr_cmd);
2598 	desc->pa[0].val1 = ((len0 << 4) & 0xfff0) |
2599 	    ((data->dma_data.cookie.dmac_address & 0xffff) << 16);
2600 	desc->pa[0].val2 =
2601 	    ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) |
2602 	    ((len - hdrlen) << 20);
2603 	IWK_DBG((IWK_DEBUG_TX, "phy addr1 = 0x%x phy addr2 = 0x%x "
2604 	    "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x",
2605 	    data->paddr_cmd, data->dma_data.cookie.dmac_address,
2606 	    len0, len - hdrlen, desc->pa[0].val1, desc->pa[0].val2));
2607 
2608 	mutex_enter(&sc->sc_tx_lock);
2609 	ring->queued++;
2610 	mutex_exit(&sc->sc_tx_lock);
2611 
2612 	/* kick ring */
2613 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
2614 	    tfd_offset[ring->cur].val = 8 + len;
2615 	if (ring->cur < IWK_MAX_WIN_SIZE) {
2616 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
2617 		    tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8 + len;
2618 	}
2619 
2620 	IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
2621 	IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
2622 
2623 	ring->cur = (ring->cur + 1) % ring->count;
2624 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
2625 	freemsg(m);
2626 	/* release node reference */
2627 	ieee80211_free_node(in);
2628 
2629 	ic->ic_stats.is_tx_bytes += len;
2630 	ic->ic_stats.is_tx_frags++;
2631 
2632 	if (sc->sc_tx_timer == 0)
2633 		sc->sc_tx_timer = 10;
2634 exit:
2635 	return (err);
2636 }
2637 
2638 static void
2639 iwk_m_ioctl(void* arg, queue_t *wq, mblk_t *mp)
2640 {
2641 	iwk_sc_t	*sc  = (iwk_sc_t *)arg;
2642 	ieee80211com_t	*ic = &sc->sc_ic;
2643 	int		err;
2644 
2645 	err = ieee80211_ioctl(ic, wq, mp);
2646 
2647 	if (err == ENETRESET) {
2648 		/*
2649 		 * This is special for the hidden AP connection.
2650 		 * In any case, we should make sure only one 'scan'
2651 		 * in the driver for a 'connect' CLI command. So
2652 		 * when connecting to a hidden AP, the scan is just
2653 		 * sent out to the air when we know the desired
2654 		 * essid of the AP we want to connect.
2655 		 */
2656 		if (ic->ic_des_esslen) {
2657 			(void) ieee80211_new_state(ic,
2658 			    IEEE80211_S_SCAN, -1);
2659 		}
2660 	}
2661 }
2662 
2663 /*
2664  * callback functions for set/get properties
2665  */
2666 static int
2667 iwk_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
2668     uint_t pr_flags, uint_t wldp_length, void *wldp_buf)
2669 {
2670 	int		err = 0;
2671 	iwk_sc_t	*sc = (iwk_sc_t *)arg;
2672 
2673 	err = ieee80211_getprop(&sc->sc_ic, pr_name, wldp_pr_num,
2674 	    pr_flags, wldp_length, wldp_buf);
2675 
2676 	return (err);
2677 }
2678 static int
2679 iwk_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
2680     uint_t wldp_length, const void *wldp_buf)
2681 {
2682 	int		err;
2683 	iwk_sc_t	*sc = (iwk_sc_t *)arg;
2684 	ieee80211com_t	*ic = &sc->sc_ic;
2685 
2686 	err = ieee80211_setprop(ic, pr_name, wldp_pr_num, wldp_length,
2687 	    wldp_buf);
2688 
2689 	if (err == ENETRESET) {
2690 		if (ic->ic_des_esslen) {
2691 			(void) ieee80211_new_state(ic,
2692 			    IEEE80211_S_SCAN, -1);
2693 		}
2694 
2695 		err = 0;
2696 	}
2697 
2698 	return (err);
2699 }
2700 
2701 /*ARGSUSED*/
2702 static int
2703 iwk_m_stat(void *arg, uint_t stat, uint64_t *val)
2704 {
2705 	iwk_sc_t	*sc  = (iwk_sc_t *)arg;
2706 	ieee80211com_t	*ic = &sc->sc_ic;
2707 	ieee80211_node_t *in = ic->ic_bss;
2708 	struct ieee80211_rateset *rs = &in->in_rates;
2709 
2710 	mutex_enter(&sc->sc_glock);
2711 	switch (stat) {
2712 	case MAC_STAT_IFSPEED:
2713 		*val = ((ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) ?
2714 		    (rs->ir_rates[in->in_txrate] & IEEE80211_RATE_VAL)
2715 		    : ic->ic_fixed_rate) /2 * 1000000;
2716 		break;
2717 	case MAC_STAT_NOXMTBUF:
2718 		*val = sc->sc_tx_nobuf;
2719 		break;
2720 	case MAC_STAT_NORCVBUF:
2721 		*val = sc->sc_rx_nobuf;
2722 		break;
2723 	case MAC_STAT_IERRORS:
2724 		*val = sc->sc_rx_err;
2725 		break;
2726 	case MAC_STAT_RBYTES:
2727 		*val = ic->ic_stats.is_rx_bytes;
2728 		break;
2729 	case MAC_STAT_IPACKETS:
2730 		*val = ic->ic_stats.is_rx_frags;
2731 		break;
2732 	case MAC_STAT_OBYTES:
2733 		*val = ic->ic_stats.is_tx_bytes;
2734 		break;
2735 	case MAC_STAT_OPACKETS:
2736 		*val = ic->ic_stats.is_tx_frags;
2737 		break;
2738 	case MAC_STAT_OERRORS:
2739 	case WIFI_STAT_TX_FAILED:
2740 		*val = sc->sc_tx_err;
2741 		break;
2742 	case WIFI_STAT_TX_RETRANS:
2743 		*val = sc->sc_tx_retries;
2744 		break;
2745 	case WIFI_STAT_FCS_ERRORS:
2746 	case WIFI_STAT_WEP_ERRORS:
2747 	case WIFI_STAT_TX_FRAGS:
2748 	case WIFI_STAT_MCAST_TX:
2749 	case WIFI_STAT_RTS_SUCCESS:
2750 	case WIFI_STAT_RTS_FAILURE:
2751 	case WIFI_STAT_ACK_FAILURE:
2752 	case WIFI_STAT_RX_FRAGS:
2753 	case WIFI_STAT_MCAST_RX:
2754 	case WIFI_STAT_RX_DUPS:
2755 		mutex_exit(&sc->sc_glock);
2756 		return (ieee80211_stat(ic, stat, val));
2757 	default:
2758 		mutex_exit(&sc->sc_glock);
2759 		return (ENOTSUP);
2760 	}
2761 	mutex_exit(&sc->sc_glock);
2762 
2763 	return (IWK_SUCCESS);
2764 
2765 }
2766 
2767 static int
2768 iwk_m_start(void *arg)
2769 {
2770 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2771 	ieee80211com_t	*ic = &sc->sc_ic;
2772 	int err;
2773 
2774 	err = iwk_init(sc);
2775 
2776 	if (err != IWK_SUCCESS) {
2777 		/*
2778 		 * The hw init err(eg. RF is OFF). Return Success to make
2779 		 * the 'plumb' succeed. The iwk_thread() tries to re-init
2780 		 * background.
2781 		 */
2782 		mutex_enter(&sc->sc_glock);
2783 		sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
2784 		mutex_exit(&sc->sc_glock);
2785 		return (IWK_SUCCESS);
2786 	}
2787 
2788 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2789 
2790 	mutex_enter(&sc->sc_glock);
2791 	sc->sc_flags |= IWK_F_RUNNING;
2792 	mutex_exit(&sc->sc_glock);
2793 
2794 	return (IWK_SUCCESS);
2795 }
2796 
2797 static void
2798 iwk_m_stop(void *arg)
2799 {
2800 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2801 	ieee80211com_t	*ic = &sc->sc_ic;
2802 
2803 	iwk_stop(sc);
2804 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2805 	mutex_enter(&sc->sc_mt_lock);
2806 	sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
2807 	sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL;
2808 	mutex_exit(&sc->sc_mt_lock);
2809 	mutex_enter(&sc->sc_glock);
2810 	sc->sc_flags &= ~IWK_F_RUNNING;
2811 	mutex_exit(&sc->sc_glock);
2812 }
2813 
2814 /*ARGSUSED*/
2815 static int
2816 iwk_m_unicst(void *arg, const uint8_t *macaddr)
2817 {
2818 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2819 	ieee80211com_t	*ic = &sc->sc_ic;
2820 	int err;
2821 
2822 	if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) {
2823 		IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr);
2824 		mutex_enter(&sc->sc_glock);
2825 		err = iwk_config(sc);
2826 		mutex_exit(&sc->sc_glock);
2827 		if (err != IWK_SUCCESS) {
2828 			cmn_err(CE_WARN,
2829 			    "iwk_m_unicst(): "
2830 			    "failed to configure device\n");
2831 			goto fail;
2832 		}
2833 	}
2834 	return (IWK_SUCCESS);
2835 fail:
2836 	return (err);
2837 }
2838 
2839 /*ARGSUSED*/
2840 static int
2841 iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m)
2842 {
2843 	return (IWK_SUCCESS);
2844 }
2845 
2846 /*ARGSUSED*/
2847 static int
2848 iwk_m_promisc(void *arg, boolean_t on)
2849 {
2850 	return (IWK_SUCCESS);
2851 }
2852 
2853 static void
2854 iwk_thread(iwk_sc_t *sc)
2855 {
2856 	ieee80211com_t	*ic = &sc->sc_ic;
2857 	clock_t clk;
2858 	int times = 0, err, n = 0, timeout = 0;
2859 	uint32_t tmp;
2860 
2861 	mutex_enter(&sc->sc_mt_lock);
2862 	while (sc->sc_mf_thread_switch) {
2863 		tmp = IWK_READ(sc, CSR_GP_CNTRL);
2864 		if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) {
2865 			sc->sc_flags &= ~IWK_F_RADIO_OFF;
2866 		} else {
2867 			sc->sc_flags |= IWK_F_RADIO_OFF;
2868 		}
2869 		/*
2870 		 * If in SUSPEND or the RF is OFF, do nothing
2871 		 */
2872 		if ((sc->sc_flags & IWK_F_SUSPEND) ||
2873 		    (sc->sc_flags & IWK_F_RADIO_OFF)) {
2874 			mutex_exit(&sc->sc_mt_lock);
2875 			delay(drv_usectohz(100000));
2876 			mutex_enter(&sc->sc_mt_lock);
2877 			continue;
2878 		}
2879 
2880 		/*
2881 		 * recovery fatal error
2882 		 */
2883 		if (ic->ic_mach &&
2884 		    (sc->sc_flags & IWK_F_HW_ERR_RECOVER)) {
2885 
2886 			IWK_DBG((IWK_DEBUG_FW,
2887 			    "iwk_thread(): "
2888 			    "try to recover fatal hw error: %d\n", times++));
2889 
2890 			iwk_stop(sc);
2891 			ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2892 
2893 			mutex_exit(&sc->sc_mt_lock);
2894 			delay(drv_usectohz(2000000 + n*500000));
2895 			mutex_enter(&sc->sc_mt_lock);
2896 			err = iwk_init(sc);
2897 			if (err != IWK_SUCCESS) {
2898 				n++;
2899 				if (n < 20)
2900 					continue;
2901 			}
2902 			n = 0;
2903 			if (!err)
2904 				sc->sc_flags |= IWK_F_RUNNING;
2905 			sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
2906 			mutex_exit(&sc->sc_mt_lock);
2907 			delay(drv_usectohz(2000000));
2908 			if (sc->sc_ostate != IEEE80211_S_INIT)
2909 				ieee80211_new_state(ic, IEEE80211_S_SCAN, 0);
2910 			mutex_enter(&sc->sc_mt_lock);
2911 		}
2912 
2913 		/*
2914 		 * rate ctl
2915 		 */
2916 		if (ic->ic_mach &&
2917 		    (sc->sc_flags & IWK_F_RATE_AUTO_CTL)) {
2918 			clk = ddi_get_lbolt();
2919 			if (clk > sc->sc_clk + drv_usectohz(500000)) {
2920 				iwk_amrr_timeout(sc);
2921 			}
2922 		}
2923 
2924 		mutex_exit(&sc->sc_mt_lock);
2925 		delay(drv_usectohz(100000));
2926 		mutex_enter(&sc->sc_mt_lock);
2927 
2928 		if (sc->sc_tx_timer) {
2929 			timeout++;
2930 			if (timeout == 10) {
2931 				sc->sc_tx_timer--;
2932 				if (sc->sc_tx_timer == 0) {
2933 					sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
2934 					sc->sc_ostate = IEEE80211_S_RUN;
2935 					IWK_DBG((IWK_DEBUG_FW,
2936 					    "iwk_thread(): try to recover from"
2937 					    " 'send fail\n"));
2938 				}
2939 				timeout = 0;
2940 			}
2941 		}
2942 
2943 	}
2944 	sc->sc_mf_thread = NULL;
2945 	cv_signal(&sc->sc_mt_cv);
2946 	mutex_exit(&sc->sc_mt_lock);
2947 }
2948 
2949 
2950 /*
2951  * Send a command to the firmware.
2952  */
2953 static int
2954 iwk_cmd(iwk_sc_t *sc, int code, const void *buf, int size, int async)
2955 {
2956 	iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM];
2957 	iwk_tx_desc_t *desc;
2958 	iwk_cmd_t *cmd;
2959 	clock_t clk;
2960 
2961 	ASSERT(size <= sizeof (cmd->data));
2962 	ASSERT(mutex_owned(&sc->sc_glock));
2963 
2964 	IWK_DBG((IWK_DEBUG_CMD, "iwk_cmd() code[%d]", code));
2965 	desc = ring->data[ring->cur].desc;
2966 	cmd = ring->data[ring->cur].cmd;
2967 
2968 	cmd->hdr.type = (uint8_t)code;
2969 	cmd->hdr.flags = 0;
2970 	cmd->hdr.qid = ring->qid;
2971 	cmd->hdr.idx = ring->cur;
2972 	(void) memcpy(cmd->data, buf, size);
2973 	(void) memset(desc, 0, sizeof (*desc));
2974 
2975 	desc->val0 = LE_32(1 << 24);
2976 	desc->pa[0].tb1_addr =
2977 	    (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff);
2978 	desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0;
2979 
2980 	/* kick cmd ring XXX */
2981 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
2982 	    tfd_offset[ring->cur].val = 8;
2983 	if (ring->cur < IWK_MAX_WIN_SIZE) {
2984 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
2985 		    tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8;
2986 	}
2987 	ring->cur = (ring->cur + 1) % ring->count;
2988 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
2989 
2990 	if (async)
2991 		return (IWK_SUCCESS);
2992 	else {
2993 		sc->sc_flags &= ~IWK_F_CMD_DONE;
2994 		clk = ddi_get_lbolt() + drv_usectohz(2000000);
2995 		while (!(sc->sc_flags & IWK_F_CMD_DONE)) {
2996 			if (cv_timedwait(&sc->sc_cmd_cv, &sc->sc_glock, clk) <
2997 			    0)
2998 				break;
2999 		}
3000 		if (sc->sc_flags & IWK_F_CMD_DONE)
3001 			return (IWK_SUCCESS);
3002 		else
3003 			return (IWK_FAIL);
3004 	}
3005 }
3006 
3007 static void
3008 iwk_set_led(iwk_sc_t *sc, uint8_t id, uint8_t off, uint8_t on)
3009 {
3010 	iwk_led_cmd_t led;
3011 
3012 	led.interval = LE_32(100000);	/* unit: 100ms */
3013 	led.id = id;
3014 	led.off = off;
3015 	led.on = on;
3016 
3017 	(void) iwk_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1);
3018 }
3019 
3020 static int
3021 iwk_hw_set_before_auth(iwk_sc_t *sc)
3022 {
3023 	ieee80211com_t *ic = &sc->sc_ic;
3024 	ieee80211_node_t *in = ic->ic_bss;
3025 	iwk_add_sta_t node;
3026 	iwk_link_quality_cmd_t link_quality;
3027 	struct ieee80211_rateset rs;
3028 	uint16_t masks = 0, rate;
3029 	int i, err;
3030 
3031 	/* update adapter's configuration according the info of target AP */
3032 	IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
3033 	sc->sc_config.chan = ieee80211_chan2ieee(ic, in->in_chan);
3034 	if (ic->ic_curmode == IEEE80211_MODE_11B) {
3035 		sc->sc_config.cck_basic_rates  = 0x03;
3036 		sc->sc_config.ofdm_basic_rates = 0;
3037 	} else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
3038 	    (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
3039 		sc->sc_config.cck_basic_rates  = 0;
3040 		sc->sc_config.ofdm_basic_rates = 0x15;
3041 	} else { /* assume 802.11b/g */
3042 		sc->sc_config.cck_basic_rates  = 0x0f;
3043 		sc->sc_config.ofdm_basic_rates = 0xff;
3044 	}
3045 
3046 	sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
3047 	    RXON_FLG_SHORT_SLOT_MSK);
3048 
3049 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
3050 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK);
3051 	else
3052 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK);
3053 
3054 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
3055 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
3056 	else
3057 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK);
3058 
3059 	IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x "
3060 	    "filter_flags %x  cck %x ofdm %x"
3061 	    " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n",
3062 	    sc->sc_config.chan, sc->sc_config.flags,
3063 	    sc->sc_config.filter_flags,
3064 	    sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates,
3065 	    sc->sc_config.bssid[0], sc->sc_config.bssid[1],
3066 	    sc->sc_config.bssid[2], sc->sc_config.bssid[3],
3067 	    sc->sc_config.bssid[4], sc->sc_config.bssid[5]));
3068 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
3069 	    sizeof (iwk_rxon_cmd_t), 1);
3070 	if (err != IWK_SUCCESS) {
3071 		cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
3072 		    " failed to config chan%d\n",
3073 		    sc->sc_config.chan);
3074 		return (err);
3075 	}
3076 
3077 	/* obtain current temperature of chipset */
3078 	sc->sc_tempera = iwk_curr_tempera(sc);
3079 
3080 	/* make Tx power calibration to determine the gains of DSP and radio */
3081 	err = iwk_tx_power_calibration(sc);
3082 	if (err) {
3083 		cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
3084 		    "failed to set tx power table\n");
3085 		return (err);
3086 	}
3087 
3088 	/* add default AP node */
3089 	(void) memset(&node, 0, sizeof (node));
3090 	IEEE80211_ADDR_COPY(node.bssid, in->in_bssid);
3091 	node.id = IWK_AP_ID;
3092 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
3093 	if (err != IWK_SUCCESS) {
3094 		cmn_err(CE_WARN, "iwk_hw_set_before_auth(): "
3095 		    "failed to add BSS node\n");
3096 		return (err);
3097 	}
3098 
3099 	/* TX_LINK_QUALITY cmd ? */
3100 	(void) memset(&link_quality, 0, sizeof (link_quality));
3101 	rs = ic->ic_sup_rates[ieee80211_chan2mode(ic, ic->ic_curchan)];
3102 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3103 		if (i < rs.ir_nrates)
3104 			rate = rs.ir_rates[rs.ir_nrates - i];
3105 		else
3106 			rate = 2;
3107 		if (rate == 2 || rate == 4 || rate == 11 || rate == 22)
3108 			masks |= RATE_MCS_CCK_MSK;
3109 		masks |= RATE_MCS_ANT_B_MSK;
3110 		masks &= ~RATE_MCS_ANT_A_MSK;
3111 		link_quality.rate_n_flags[i] =
3112 		    iwk_rate_to_plcp(rate) | masks;
3113 	}
3114 
3115 	link_quality.general_params.single_stream_ant_msk = 2;
3116 	link_quality.general_params.dual_stream_ant_msk = 3;
3117 	link_quality.agg_params.agg_dis_start_th = 3;
3118 	link_quality.agg_params.agg_time_limit = LE_16(4000);
3119 	link_quality.sta_id = IWK_AP_ID;
3120 	err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
3121 	    sizeof (link_quality), 1);
3122 	if (err != IWK_SUCCESS) {
3123 		cmn_err(CE_WARN, "iwk_hw_set_before_auth(): "
3124 		    "failed to config link quality table\n");
3125 		return (err);
3126 	}
3127 
3128 	return (IWK_SUCCESS);
3129 }
3130 
3131 /*
3132  * Send a scan request(assembly scan cmd) to the firmware.
3133  */
3134 static int
3135 iwk_scan(iwk_sc_t *sc)
3136 {
3137 	ieee80211com_t *ic = &sc->sc_ic;
3138 	iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM];
3139 	iwk_tx_desc_t *desc;
3140 	iwk_tx_data_t *data;
3141 	iwk_cmd_t *cmd;
3142 	iwk_scan_hdr_t *hdr;
3143 	iwk_scan_chan_t *chan;
3144 	struct ieee80211_frame *wh;
3145 	ieee80211_node_t *in = ic->ic_bss;
3146 	struct ieee80211_rateset *rs;
3147 	enum ieee80211_phymode mode;
3148 	uint8_t *frm;
3149 	int i, pktlen, nrates;
3150 
3151 	sc->sc_flags |= IWK_F_SCANNING;
3152 
3153 	data = &ring->data[ring->cur];
3154 	desc = data->desc;
3155 	cmd = (iwk_cmd_t *)data->dma_data.mem_va;
3156 
3157 	cmd->hdr.type = REPLY_SCAN_CMD;
3158 	cmd->hdr.flags = 0;
3159 	cmd->hdr.qid = ring->qid;
3160 	cmd->hdr.idx = ring->cur | 0x40;
3161 
3162 	hdr = (iwk_scan_hdr_t *)cmd->data;
3163 	(void) memset(hdr, 0, sizeof (iwk_scan_hdr_t));
3164 	hdr->nchan = 11;
3165 	hdr->quiet_time = LE_16(5);
3166 	hdr->quiet_plcp_th = LE_16(1);
3167 
3168 	hdr->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
3169 	hdr->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3170 	    LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) |
3171 	    (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3172 	    (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3173 
3174 	hdr->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
3175 	hdr->tx_cmd.sta_id = IWK_BROADCAST_ID;
3176 	hdr->tx_cmd.stop_time.life_time = 0xffffffff;
3177 	hdr->tx_cmd.tx_flags |= (0x200);
3178 	hdr->tx_cmd.rate.r.rate_n_flags = iwk_rate_to_plcp(2);
3179 	hdr->tx_cmd.rate.r.rate_n_flags |=
3180 	    (RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK);
3181 	hdr->direct_scan[0].len = ic->ic_des_esslen;
3182 	hdr->direct_scan[0].id  = IEEE80211_ELEMID_SSID;
3183 
3184 	if (ic->ic_des_esslen)
3185 		bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid,
3186 		    ic->ic_des_esslen);
3187 	else
3188 		bzero(hdr->direct_scan[0].ssid,
3189 		    sizeof (hdr->direct_scan[0].ssid));
3190 	/*
3191 	 * a probe request frame is required after the REPLY_SCAN_CMD
3192 	 */
3193 	wh = (struct ieee80211_frame *)(hdr + 1);
3194 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
3195 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
3196 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
3197 	(void) memset(wh->i_addr1, 0xff, 6);
3198 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr);
3199 	(void) memset(wh->i_addr3, 0xff, 6);
3200 	*(uint16_t *)&wh->i_dur[0] = 0;
3201 	*(uint16_t *)&wh->i_seq[0] = 0;
3202 
3203 	frm = (uint8_t *)(wh + 1);
3204 
3205 	/* essid IE */
3206 	*frm++ = IEEE80211_ELEMID_SSID;
3207 	*frm++ = in->in_esslen;
3208 	(void) memcpy(frm, in->in_essid, in->in_esslen);
3209 	frm += in->in_esslen;
3210 
3211 	mode = ieee80211_chan2mode(ic, ic->ic_curchan);
3212 	rs = &ic->ic_sup_rates[mode];
3213 
3214 	/* supported rates IE */
3215 	*frm++ = IEEE80211_ELEMID_RATES;
3216 	nrates = rs->ir_nrates;
3217 	if (nrates > IEEE80211_RATE_SIZE)
3218 		nrates = IEEE80211_RATE_SIZE;
3219 	*frm++ = (uint8_t)nrates;
3220 	(void) memcpy(frm, rs->ir_rates, nrates);
3221 	frm += nrates;
3222 
3223 	/* supported xrates IE */
3224 	if (rs->ir_nrates > IEEE80211_RATE_SIZE) {
3225 		nrates = rs->ir_nrates - IEEE80211_RATE_SIZE;
3226 		*frm++ = IEEE80211_ELEMID_XRATES;
3227 		*frm++ = (uint8_t)nrates;
3228 		(void) memcpy(frm, rs->ir_rates + IEEE80211_RATE_SIZE, nrates);
3229 		frm += nrates;
3230 	}
3231 
3232 	/* optionnal IE (usually for wpa) */
3233 	if (ic->ic_opt_ie != NULL) {
3234 		(void) memcpy(frm, ic->ic_opt_ie, ic->ic_opt_ie_len);
3235 		frm += ic->ic_opt_ie_len;
3236 	}
3237 
3238 	/* setup length of probe request */
3239 	hdr->tx_cmd.len = LE_16(_PTRDIFF(frm, wh));
3240 	hdr->len = hdr->nchan * sizeof (iwk_scan_chan_t) +
3241 	    hdr->tx_cmd.len + sizeof (iwk_scan_hdr_t);
3242 
3243 	/*
3244 	 * the attribute of the scan channels are required after the probe
3245 	 * request frame.
3246 	 */
3247 	chan = (iwk_scan_chan_t *)frm;
3248 	for (i = 1; i <= hdr->nchan; i++, chan++) {
3249 		chan->type = 3;
3250 		chan->chan = (uint8_t)i;
3251 		chan->tpc.tx_gain = 0x3f;
3252 		chan->tpc.dsp_atten = 110;
3253 		chan->active_dwell = LE_16(20);
3254 		chan->passive_dwell = LE_16(120);
3255 
3256 		frm += sizeof (iwk_scan_chan_t);
3257 	}
3258 
3259 	pktlen = _PTRDIFF(frm, cmd);
3260 
3261 	(void) memset(desc, 0, sizeof (*desc));
3262 	desc->val0 = LE_32(1 << 24);
3263 	desc->pa[0].tb1_addr =
3264 	    (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff);
3265 	desc->pa[0].val1 = (pktlen << 4) & 0xfff0;
3266 
3267 	/*
3268 	 * maybe for cmd, filling the byte cnt table is not necessary.
3269 	 * anyway, we fill it here.
3270 	 */
3271 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3272 	    tfd_offset[ring->cur].val = 8;
3273 	if (ring->cur < IWK_MAX_WIN_SIZE) {
3274 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3275 		    tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8;
3276 	}
3277 
3278 	/* kick cmd ring */
3279 	ring->cur = (ring->cur + 1) % ring->count;
3280 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3281 
3282 	return (IWK_SUCCESS);
3283 }
3284 
3285 static int
3286 iwk_config(iwk_sc_t *sc)
3287 {
3288 	ieee80211com_t *ic = &sc->sc_ic;
3289 	iwk_powertable_cmd_t powertable;
3290 	iwk_bt_cmd_t bt;
3291 	iwk_add_sta_t node;
3292 	iwk_link_quality_cmd_t link_quality;
3293 	int i, err;
3294 	uint16_t masks = 0;
3295 
3296 	/*
3297 	 * set power mode. Disable power management at present, do it later
3298 	 */
3299 	(void) memset(&powertable, 0, sizeof (powertable));
3300 	powertable.flags = LE_16(0x8);
3301 	err = iwk_cmd(sc, POWER_TABLE_CMD, &powertable,
3302 	    sizeof (powertable), 0);
3303 	if (err != IWK_SUCCESS) {
3304 		cmn_err(CE_WARN, "iwk_config(): failed to set power mode\n");
3305 		return (err);
3306 	}
3307 
3308 	/* configure bt coexistence */
3309 	(void) memset(&bt, 0, sizeof (bt));
3310 	bt.flags = 3;
3311 	bt.lead_time = 0xaa;
3312 	bt.max_kill = 1;
3313 	err = iwk_cmd(sc, REPLY_BT_CONFIG, &bt,
3314 	    sizeof (bt), 0);
3315 	if (err != IWK_SUCCESS) {
3316 		cmn_err(CE_WARN,
3317 		    "iwk_config(): "
3318 		    "failed to configurate bt coexistence\n");
3319 		return (err);
3320 	}
3321 
3322 	/* configure rxon */
3323 	(void) memset(&sc->sc_config, 0, sizeof (iwk_rxon_cmd_t));
3324 	IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
3325 	IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
3326 	sc->sc_config.chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
3327 	sc->sc_config.flags = (RXON_FLG_TSF2HOST_MSK |
3328 	    RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_BAND_24G_MSK);
3329 	sc->sc_config.flags &= (~RXON_FLG_CCK_MSK);
3330 	switch (ic->ic_opmode) {
3331 	case IEEE80211_M_STA:
3332 		sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
3333 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3334 		    RXON_FILTER_DIS_DECRYPT_MSK |
3335 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
3336 		break;
3337 	case IEEE80211_M_AHDEMO:
3338 		sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
3339 		sc->sc_config.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
3340 		sc->sc_config.filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3341 		    RXON_FILTER_DIS_DECRYPT_MSK |
3342 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
3343 		break;
3344 	case IEEE80211_M_HOSTAP:
3345 		sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
3346 		break;
3347 	case IEEE80211_M_MONITOR:
3348 		sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
3349 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3350 		    RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
3351 		break;
3352 	}
3353 	sc->sc_config.cck_basic_rates  = 0x0f;
3354 	sc->sc_config.ofdm_basic_rates = 0xff;
3355 
3356 	sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
3357 	sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff;
3358 
3359 	/* set antenna */
3360 
3361 	sc->sc_config.rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3362 	    LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) |
3363 	    (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3364 	    (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3365 
3366 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
3367 	    sizeof (iwk_rxon_cmd_t), 0);
3368 	if (err != IWK_SUCCESS) {
3369 		cmn_err(CE_WARN, "iwk_config(): "
3370 		    "failed to set configure command\n");
3371 		return (err);
3372 	}
3373 	/* obtain current temperature of chipset */
3374 	sc->sc_tempera = iwk_curr_tempera(sc);
3375 
3376 	/* make Tx power calibration to determine the gains of DSP and radio */
3377 	err = iwk_tx_power_calibration(sc);
3378 	if (err) {
3379 		cmn_err(CE_WARN, "iwk_config(): "
3380 		    "failed to set tx power table\n");
3381 		return (err);
3382 	}
3383 
3384 	/* add broadcast node so that we can send broadcast frame */
3385 	(void) memset(&node, 0, sizeof (node));
3386 	(void) memset(node.bssid, 0xff, 6);
3387 	node.id = IWK_BROADCAST_ID;
3388 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0);
3389 	if (err != IWK_SUCCESS) {
3390 		cmn_err(CE_WARN, "iwk_config(): "
3391 		    "failed to add broadcast node\n");
3392 		return (err);
3393 	}
3394 
3395 	/* TX_LINK_QUALITY cmd ? */
3396 	(void) memset(&link_quality, 0, sizeof (link_quality));
3397 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3398 		masks |= RATE_MCS_CCK_MSK;
3399 		masks |= RATE_MCS_ANT_B_MSK;
3400 		masks &= ~RATE_MCS_ANT_A_MSK;
3401 		link_quality.rate_n_flags[i] = iwk_rate_to_plcp(2) | masks;
3402 	}
3403 
3404 	link_quality.general_params.single_stream_ant_msk = 2;
3405 	link_quality.general_params.dual_stream_ant_msk = 3;
3406 	link_quality.agg_params.agg_dis_start_th = 3;
3407 	link_quality.agg_params.agg_time_limit = LE_16(4000);
3408 	link_quality.sta_id = IWK_BROADCAST_ID;
3409 	err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
3410 	    sizeof (link_quality), 0);
3411 	if (err != IWK_SUCCESS) {
3412 		cmn_err(CE_WARN, "iwk_config(): "
3413 		    "failed to config link quality table\n");
3414 		return (err);
3415 	}
3416 
3417 	return (IWK_SUCCESS);
3418 }
3419 
3420 static void
3421 iwk_stop_master(iwk_sc_t *sc)
3422 {
3423 	uint32_t tmp;
3424 	int n;
3425 
3426 	tmp = IWK_READ(sc, CSR_RESET);
3427 	IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER);
3428 
3429 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
3430 	if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) ==
3431 	    CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE)
3432 		return;
3433 
3434 	for (n = 0; n < 2000; n++) {
3435 		if (IWK_READ(sc, CSR_RESET) &
3436 		    CSR_RESET_REG_FLAG_MASTER_DISABLED)
3437 			break;
3438 		DELAY(1000);
3439 	}
3440 	if (n == 2000)
3441 		IWK_DBG((IWK_DEBUG_HW,
3442 		    "timeout waiting for master stop\n"));
3443 }
3444 
3445 static int
3446 iwk_power_up(iwk_sc_t *sc)
3447 {
3448 	uint32_t tmp;
3449 
3450 	iwk_mac_access_enter(sc);
3451 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3452 	tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC;
3453 	tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN;
3454 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3455 	iwk_mac_access_exit(sc);
3456 
3457 	DELAY(5000);
3458 	return (IWK_SUCCESS);
3459 }
3460 
3461 static int
3462 iwk_preinit(iwk_sc_t *sc)
3463 {
3464 	uint32_t tmp;
3465 	int n;
3466 	uint8_t vlink;
3467 
3468 	/* clear any pending interrupts */
3469 	IWK_WRITE(sc, CSR_INT, 0xffffffff);
3470 
3471 	tmp = IWK_READ(sc, CSR_GIO_CHICKEN_BITS);
3472 	IWK_WRITE(sc, CSR_GIO_CHICKEN_BITS,
3473 	    tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
3474 
3475 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
3476 	IWK_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
3477 
3478 	/* wait for clock ready */
3479 	for (n = 0; n < 1000; n++) {
3480 		if (IWK_READ(sc, CSR_GP_CNTRL) &
3481 		    CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY)
3482 			break;
3483 		DELAY(10);
3484 	}
3485 	if (n == 1000) {
3486 		return (ETIMEDOUT);
3487 	}
3488 	iwk_mac_access_enter(sc);
3489 	tmp = iwk_reg_read(sc, APMG_CLK_CTRL_REG);
3490 	iwk_reg_write(sc, APMG_CLK_CTRL_REG, tmp |
3491 	    APMG_CLK_REG_VAL_DMA_CLK_RQT | APMG_CLK_REG_VAL_BSM_CLK_RQT);
3492 
3493 	DELAY(20);
3494 	tmp = iwk_reg_read(sc, ALM_APMG_PCIDEV_STT);
3495 	iwk_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp |
3496 	    APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE);
3497 	iwk_mac_access_exit(sc);
3498 
3499 	IWK_WRITE(sc, CSR_INT_COALESCING, 512 / 32); /* ??? */
3500 
3501 	(void) iwk_power_up(sc);
3502 
3503 	if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) {
3504 		tmp = ddi_get32(sc->sc_cfg_handle,
3505 		    (uint32_t *)(sc->sc_cfg_base + 0xe8));
3506 		ddi_put32(sc->sc_cfg_handle,
3507 		    (uint32_t *)(sc->sc_cfg_base + 0xe8),
3508 		    tmp & ~(1 << 11));
3509 	}
3510 
3511 
3512 	vlink = ddi_get8(sc->sc_cfg_handle,
3513 	    (uint8_t *)(sc->sc_cfg_base + 0xf0));
3514 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0),
3515 	    vlink & ~2);
3516 
3517 	tmp = IWK_READ(sc, CSR_SW_VER);
3518 	tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
3519 	    CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
3520 	    CSR_HW_IF_CONFIG_REG_BIT_KEDRON_R;
3521 	IWK_WRITE(sc, CSR_SW_VER, tmp);
3522 
3523 	/* make sure power supply on each part of the hardware */
3524 	iwk_mac_access_enter(sc);
3525 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3526 	tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
3527 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3528 	DELAY(5);
3529 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3530 	tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
3531 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3532 	iwk_mac_access_exit(sc);
3533 	return (IWK_SUCCESS);
3534 }
3535 
3536 /*
3537  * set up semphore flag to own EEPROM
3538  */
3539 static int iwk_eep_sem_down(iwk_sc_t *sc)
3540 {
3541 	int count1, count2;
3542 	uint32_t tmp;
3543 
3544 	for (count1 = 0; count1 < 1000; count1++) {
3545 		tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG);
3546 		IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG,
3547 		    tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM);
3548 
3549 		for (count2 = 0; count2 < 2; count2++) {
3550 			if (IWK_READ(sc, CSR_HW_IF_CONFIG_REG) &
3551 			    CSR_HW_IF_CONFIG_REG_EEP_SEM)
3552 				return (IWK_SUCCESS);
3553 			DELAY(10000);
3554 		}
3555 	}
3556 	return (IWK_FAIL);
3557 }
3558 
3559 /*
3560  * reset semphore flag to release EEPROM
3561  */
3562 static void iwk_eep_sem_up(iwk_sc_t *sc)
3563 {
3564 	uint32_t tmp;
3565 
3566 	tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG);
3567 	IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG,
3568 	    tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM));
3569 }
3570 
3571 /*
3572  * This function load all infomation in eeprom into iwk_eep
3573  * structure in iwk_sc_t structure
3574  */
3575 static int iwk_eep_load(iwk_sc_t *sc)
3576 {
3577 	int i, rr;
3578 	uint32_t rv, tmp, eep_gp;
3579 	uint16_t addr, eep_sz = sizeof (sc->sc_eep_map);
3580 	uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map;
3581 
3582 	/* read eeprom gp register in CSR */
3583 	eep_gp = IWK_READ(sc, CSR_EEPROM_GP);
3584 	if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) ==
3585 	    CSR_EEPROM_GP_BAD_SIGNATURE) {
3586 		IWK_DBG((IWK_DEBUG_EEPROM, "not find eeprom\n"));
3587 		return (IWK_FAIL);
3588 	}
3589 
3590 	rr = iwk_eep_sem_down(sc);
3591 	if (rr != 0) {
3592 		IWK_DBG((IWK_DEBUG_EEPROM, "driver failed to own EEPROM\n"));
3593 		return (IWK_FAIL);
3594 	}
3595 
3596 	for (addr = 0; addr < eep_sz; addr += 2) {
3597 		IWK_WRITE(sc, CSR_EEPROM_REG, addr<<1);
3598 		tmp = IWK_READ(sc, CSR_EEPROM_REG);
3599 		IWK_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2));
3600 
3601 		for (i = 0; i < 10; i++) {
3602 			rv = IWK_READ(sc, CSR_EEPROM_REG);
3603 			if (rv & 1)
3604 				break;
3605 			DELAY(10);
3606 		}
3607 
3608 		if (!(rv & 1)) {
3609 			IWK_DBG((IWK_DEBUG_EEPROM,
3610 			    "time out when read eeprome\n"));
3611 			iwk_eep_sem_up(sc);
3612 			return (IWK_FAIL);
3613 		}
3614 
3615 		eep_p[addr/2] = rv >> 16;
3616 	}
3617 
3618 	iwk_eep_sem_up(sc);
3619 	return (IWK_SUCCESS);
3620 }
3621 
3622 /*
3623  * init mac address in ieee80211com_t struct
3624  */
3625 static void iwk_get_mac_from_eep(iwk_sc_t *sc)
3626 {
3627 	ieee80211com_t *ic = &sc->sc_ic;
3628 	struct iwk_eep *ep = &sc->sc_eep_map;
3629 
3630 	IEEE80211_ADDR_COPY(ic->ic_macaddr, ep->mac_address);
3631 
3632 	IWK_DBG((IWK_DEBUG_EEPROM, "mac:%2x:%2x:%2x:%2x:%2x:%2x\n",
3633 	    ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2],
3634 	    ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5]));
3635 }
3636 
3637 static int
3638 iwk_init(iwk_sc_t *sc)
3639 {
3640 	int qid, n, err;
3641 	clock_t clk;
3642 	uint32_t tmp;
3643 
3644 	mutex_enter(&sc->sc_glock);
3645 	sc->sc_flags &= ~IWK_F_FW_INIT;
3646 
3647 	(void) iwk_preinit(sc);
3648 
3649 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
3650 	if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) {
3651 		cmn_err(CE_WARN, "iwk_init(): Radio transmitter is off\n");
3652 		goto fail1;
3653 	}
3654 
3655 	/* init Rx ring */
3656 	iwk_mac_access_enter(sc);
3657 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
3658 
3659 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
3660 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
3661 	    sc->sc_rxq.dma_desc.cookie.dmac_address >> 8);
3662 
3663 	IWK_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG,
3664 	    ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address +
3665 	    offsetof(struct iwk_shared, val0)) >> 4));
3666 
3667 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG,
3668 	    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
3669 	    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
3670 	    IWK_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
3671 	    (RX_QUEUE_SIZE_LOG <<
3672 	    FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
3673 	iwk_mac_access_exit(sc);
3674 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG,
3675 	    (RX_QUEUE_SIZE - 1) & ~0x7);
3676 
3677 	/* init Tx rings */
3678 	iwk_mac_access_enter(sc);
3679 	iwk_reg_write(sc, SCD_TXFACT, 0);
3680 
3681 	/* keep warm page */
3682 	iwk_reg_write(sc, IWK_FH_KW_MEM_ADDR_REG,
3683 	    sc->sc_dma_kw.cookie.dmac_address >> 4);
3684 
3685 	for (qid = 0; qid < IWK_NUM_QUEUES; qid++) {
3686 		IWK_WRITE(sc, FH_MEM_CBBC_QUEUE(qid),
3687 		    sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8);
3688 		IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(qid),
3689 		    IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
3690 		    IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
3691 	}
3692 	iwk_mac_access_exit(sc);
3693 
3694 	/* clear "radio off" and "disable command" bits */
3695 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
3696 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR,
3697 	    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3698 
3699 	/* clear any pending interrupts */
3700 	IWK_WRITE(sc, CSR_INT, 0xffffffff);
3701 
3702 	/* enable interrupts */
3703 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
3704 
3705 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
3706 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
3707 
3708 	/*
3709 	 * backup ucode data part for future use.
3710 	 */
3711 	(void) memcpy(sc->sc_dma_fw_data_bak.mem_va,
3712 	    sc->sc_dma_fw_data.mem_va,
3713 	    sc->sc_dma_fw_data.alength);
3714 
3715 	for (n = 0; n < 2; n++) {
3716 		/* load firmware init segment into NIC */
3717 		err = iwk_load_firmware(sc);
3718 		if (err != IWK_SUCCESS) {
3719 			cmn_err(CE_WARN, "iwk_init(): "
3720 			    "failed to setup boot firmware\n");
3721 			continue;
3722 		}
3723 
3724 		/* now press "execute" start running */
3725 		IWK_WRITE(sc, CSR_RESET, 0);
3726 		break;
3727 	}
3728 	if (n == 2) {
3729 		cmn_err(CE_WARN, "iwk_init(): failed to load firmware\n");
3730 		goto fail1;
3731 	}
3732 	/* ..and wait at most one second for adapter to initialize */
3733 	clk = ddi_get_lbolt() + drv_usectohz(2000000);
3734 	while (!(sc->sc_flags & IWK_F_FW_INIT)) {
3735 		if (cv_timedwait(&sc->sc_fw_cv, &sc->sc_glock, clk) < 0)
3736 			break;
3737 	}
3738 	if (!(sc->sc_flags & IWK_F_FW_INIT)) {
3739 		cmn_err(CE_WARN,
3740 		    "iwk_init(): timeout waiting for firmware init\n");
3741 		goto fail1;
3742 	}
3743 
3744 	/*
3745 	 * at this point, the firmware is loaded OK, then config the hardware
3746 	 * with the ucode API, including rxon, txpower, etc.
3747 	 */
3748 	err = iwk_config(sc);
3749 	if (err) {
3750 		cmn_err(CE_WARN, "iwk_init(): failed to configure device\n");
3751 		goto fail1;
3752 	}
3753 
3754 	/* at this point, hardware may receive beacons :) */
3755 	mutex_exit(&sc->sc_glock);
3756 	return (IWK_SUCCESS);
3757 
3758 fail1:
3759 	err = IWK_FAIL;
3760 	mutex_exit(&sc->sc_glock);
3761 	return (err);
3762 }
3763 
3764 static void
3765 iwk_stop(iwk_sc_t *sc)
3766 {
3767 	uint32_t tmp;
3768 	int i;
3769 
3770 
3771 	mutex_enter(&sc->sc_glock);
3772 
3773 	IWK_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3774 	/* disable interrupts */
3775 	IWK_WRITE(sc, CSR_INT_MASK, 0);
3776 	IWK_WRITE(sc, CSR_INT, CSR_INI_SET_MASK);
3777 	IWK_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff);
3778 
3779 	/* reset all Tx rings */
3780 	for (i = 0; i < IWK_NUM_QUEUES; i++)
3781 		iwk_reset_tx_ring(sc, &sc->sc_txq[i]);
3782 
3783 	/* reset Rx ring */
3784 	iwk_reset_rx_ring(sc);
3785 
3786 	iwk_mac_access_enter(sc);
3787 	iwk_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT);
3788 	iwk_mac_access_exit(sc);
3789 
3790 	DELAY(5);
3791 
3792 	iwk_stop_master(sc);
3793 
3794 	sc->sc_tx_timer = 0;
3795 	tmp = IWK_READ(sc, CSR_RESET);
3796 	IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET);
3797 	mutex_exit(&sc->sc_glock);
3798 }
3799 
3800 /*
3801  * Naive implementation of the Adaptive Multi Rate Retry algorithm:
3802  * "IEEE 802.11 Rate Adaptation: A Practical Approach"
3803  * Mathieu Lacage, Hossein Manshaei, Thierry Turletti
3804  * INRIA Sophia - Projet Planete
3805  * http://www-sop.inria.fr/rapports/sophia/RR-5208.html
3806  */
3807 #define	is_success(amrr)	\
3808 	((amrr)->retrycnt < (amrr)->txcnt / 10)
3809 #define	is_failure(amrr)	\
3810 	((amrr)->retrycnt > (amrr)->txcnt / 3)
3811 #define	is_enough(amrr)		\
3812 	((amrr)->txcnt > 100)
3813 #define	is_min_rate(in)		\
3814 	((in)->in_txrate == 0)
3815 #define	is_max_rate(in)		\
3816 	((in)->in_txrate == (in)->in_rates.ir_nrates - 1)
3817 #define	increase_rate(in)	\
3818 	((in)->in_txrate++)
3819 #define	decrease_rate(in)	\
3820 	((in)->in_txrate--)
3821 #define	reset_cnt(amrr)		\
3822 	{ (amrr)->txcnt = (amrr)->retrycnt = 0; }
3823 
3824 #define	IWK_AMRR_MIN_SUCCESS_THRESHOLD	 1
3825 #define	IWK_AMRR_MAX_SUCCESS_THRESHOLD	15
3826 
3827 static void
3828 iwk_amrr_init(iwk_amrr_t *amrr)
3829 {
3830 	amrr->success = 0;
3831 	amrr->recovery = 0;
3832 	amrr->txcnt = amrr->retrycnt = 0;
3833 	amrr->success_threshold = IWK_AMRR_MIN_SUCCESS_THRESHOLD;
3834 }
3835 
3836 static void
3837 iwk_amrr_timeout(iwk_sc_t *sc)
3838 {
3839 	ieee80211com_t *ic = &sc->sc_ic;
3840 
3841 	IWK_DBG((IWK_DEBUG_RATECTL, "iwk_amrr_timeout() enter\n"));
3842 	if (ic->ic_opmode == IEEE80211_M_STA)
3843 		iwk_amrr_ratectl(NULL, ic->ic_bss);
3844 	else
3845 		ieee80211_iterate_nodes(&ic->ic_sta, iwk_amrr_ratectl, NULL);
3846 	sc->sc_clk = ddi_get_lbolt();
3847 }
3848 
3849 /* ARGSUSED */
3850 static void
3851 iwk_amrr_ratectl(void *arg, ieee80211_node_t *in)
3852 {
3853 	iwk_amrr_t *amrr = (iwk_amrr_t *)in;
3854 	int need_change = 0;
3855 
3856 	if (is_success(amrr) && is_enough(amrr)) {
3857 		amrr->success++;
3858 		if (amrr->success >= amrr->success_threshold &&
3859 		    !is_max_rate(in)) {
3860 			amrr->recovery = 1;
3861 			amrr->success = 0;
3862 			increase_rate(in);
3863 			IWK_DBG((IWK_DEBUG_RATECTL,
3864 			    "AMRR increasing rate %d (txcnt=%d retrycnt=%d)\n",
3865 			    in->in_txrate, amrr->txcnt, amrr->retrycnt));
3866 			need_change = 1;
3867 		} else {
3868 			amrr->recovery = 0;
3869 		}
3870 	} else if (is_failure(amrr)) {
3871 		amrr->success = 0;
3872 		if (!is_min_rate(in)) {
3873 			if (amrr->recovery) {
3874 				amrr->success_threshold++;
3875 				if (amrr->success_threshold >
3876 				    IWK_AMRR_MAX_SUCCESS_THRESHOLD)
3877 					amrr->success_threshold =
3878 					    IWK_AMRR_MAX_SUCCESS_THRESHOLD;
3879 			} else {
3880 				amrr->success_threshold =
3881 				    IWK_AMRR_MIN_SUCCESS_THRESHOLD;
3882 			}
3883 			decrease_rate(in);
3884 			IWK_DBG((IWK_DEBUG_RATECTL,
3885 			    "AMRR decreasing rate %d (txcnt=%d retrycnt=%d)\n",
3886 			    in->in_txrate, amrr->txcnt, amrr->retrycnt));
3887 			need_change = 1;
3888 		}
3889 		amrr->recovery = 0;	/* paper is incorrect */
3890 	}
3891 
3892 	if (is_enough(amrr) || need_change)
3893 		reset_cnt(amrr);
3894 }
3895 
3896 /*
3897  * calculate 4965 chipset's kelvin temperature according to
3898  * the data of init alive and satistics notification.
3899  * The details is described in iwk_calibration.h file
3900  */
3901 static int32_t iwk_curr_tempera(iwk_sc_t *sc)
3902 {
3903 	int32_t  tempera;
3904 	int32_t  r1, r2, r3;
3905 	uint32_t  r4_u;
3906 	int32_t   r4_s;
3907 
3908 	if (iwk_is_fat_channel(sc)) {
3909 		r1 = (int32_t)(sc->sc_card_alive_init.therm_r1[1]);
3910 		r2 = (int32_t)(sc->sc_card_alive_init.therm_r2[1]);
3911 		r3 = (int32_t)(sc->sc_card_alive_init.therm_r3[1]);
3912 		r4_u = sc->sc_card_alive_init.therm_r4[1];
3913 	} else {
3914 		r1 = (int32_t)(sc->sc_card_alive_init.therm_r1[0]);
3915 		r2 = (int32_t)(sc->sc_card_alive_init.therm_r2[0]);
3916 		r3 = (int32_t)(sc->sc_card_alive_init.therm_r3[0]);
3917 		r4_u = sc->sc_card_alive_init.therm_r4[0];
3918 	}
3919 
3920 	if (sc->sc_flags & IWK_F_STATISTICS) {
3921 		r4_s = (int32_t)(sc->sc_statistics.general.temperature <<
3922 		    (31-23)) >> (31-23);
3923 	} else {
3924 		r4_s = (int32_t)(r4_u << (31-23)) >> (31-23);
3925 	}
3926 
3927 	IWK_DBG((IWK_DEBUG_CALIBRATION, "temperature R[1-4]: %d %d %d %d\n",
3928 	    r1, r2, r3, r4_s));
3929 
3930 	if (r3 == r1) {
3931 		cmn_err(CE_WARN, "iwk_curr_tempera(): "
3932 		    "failed to calculate temperature"
3933 		    "because r3 = r1\n");
3934 		return (DDI_FAILURE);
3935 	}
3936 
3937 	tempera = TEMPERATURE_CALIB_A_VAL * (r4_s - r2);
3938 	tempera /= (r3 - r1);
3939 	tempera = (tempera*97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
3940 
3941 	IWK_DBG((IWK_DEBUG_CALIBRATION, "calculated temperature: %dK, %dC\n",
3942 	    tempera, KELVIN_TO_CELSIUS(tempera)));
3943 
3944 	return (tempera);
3945 }
3946 
3947 /* Determine whether 4965 is using 2.4 GHz band */
3948 static inline int iwk_is_24G_band(iwk_sc_t *sc)
3949 {
3950 	return (sc->sc_config.flags & RXON_FLG_BAND_24G_MSK);
3951 }
3952 
3953 /* Determine whether 4965 is using fat channel */
3954 static inline int iwk_is_fat_channel(iwk_sc_t *sc)
3955 {
3956 	return ((sc->sc_config.flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
3957 	    (sc->sc_config.flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK));
3958 }
3959 
3960 /*
3961  * In MIMO mode, determine which group 4965's current channel belong to.
3962  * For more infomation about "channel group",
3963  * please refer to iwk_calibration.h file
3964  */
3965 static int iwk_txpower_grp(uint16_t channel)
3966 {
3967 	if (channel >= CALIB_IWK_TX_ATTEN_GR5_FCH &&
3968 	    channel <= CALIB_IWK_TX_ATTEN_GR5_LCH) {
3969 		return (CALIB_CH_GROUP_5);
3970 	}
3971 
3972 	if (channel >= CALIB_IWK_TX_ATTEN_GR1_FCH &&
3973 	    channel <= CALIB_IWK_TX_ATTEN_GR1_LCH) {
3974 		return (CALIB_CH_GROUP_1);
3975 	}
3976 
3977 	if (channel >= CALIB_IWK_TX_ATTEN_GR2_FCH &&
3978 	    channel <= CALIB_IWK_TX_ATTEN_GR2_LCH) {
3979 		return (CALIB_CH_GROUP_2);
3980 	}
3981 
3982 	if (channel >= CALIB_IWK_TX_ATTEN_GR3_FCH &&
3983 	    channel <= CALIB_IWK_TX_ATTEN_GR3_LCH) {
3984 		return (CALIB_CH_GROUP_3);
3985 	}
3986 
3987 	if (channel >= CALIB_IWK_TX_ATTEN_GR4_FCH &&
3988 	    channel <= CALIB_IWK_TX_ATTEN_GR4_LCH) {
3989 		return (CALIB_CH_GROUP_4);
3990 	}
3991 
3992 	cmn_err(CE_WARN, "iwk_txpower_grp(): "
3993 	    "can't find txpower group for channel %d.\n", channel);
3994 
3995 	return (DDI_FAILURE);
3996 }
3997 
3998 /* 2.4 GHz */
3999 static uint16_t iwk_eep_band_1[14] = {
4000 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
4001 };
4002 
4003 /* 5.2 GHz bands */
4004 static uint16_t iwk_eep_band_2[13] = {
4005 	183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
4006 };
4007 
4008 static uint16_t iwk_eep_band_3[12] = {
4009 	34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
4010 };
4011 
4012 static uint16_t iwk_eep_band_4[11] = {
4013 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
4014 };
4015 
4016 static uint16_t iwk_eep_band_5[6] = {
4017 	145, 149, 153, 157, 161, 165
4018 };
4019 
4020 static uint16_t iwk_eep_band_6[7] = {
4021 	1, 2, 3, 4, 5, 6, 7
4022 };
4023 
4024 static uint16_t iwk_eep_band_7[11] = {
4025 	36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
4026 };
4027 
4028 /* Get regulatory data from eeprom for a given channel */
4029 static struct iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc,
4030     uint16_t channel,
4031     int is_24G, int is_fat, int is_hi_chan)
4032 {
4033 	int32_t i;
4034 	uint16_t chan;
4035 
4036 	if (is_fat) {  /* 11n mode */
4037 
4038 		if (is_hi_chan) {
4039 			chan = channel - 4;
4040 		} else {
4041 			chan = channel;
4042 		}
4043 
4044 		for (i = 0; i < 7; i++) {
4045 			if (iwk_eep_band_6[i] == chan) {
4046 				return (&sc->sc_eep_map.band_24_channels[i]);
4047 			}
4048 		}
4049 		for (i = 0; i < 11; i++) {
4050 			if (iwk_eep_band_7[i] == chan) {
4051 				return (&sc->sc_eep_map.band_52_channels[i]);
4052 			}
4053 		}
4054 	} else if (is_24G) {  /* 2.4 GHz band */
4055 		for (i = 0; i < 14; i++) {
4056 			if (iwk_eep_band_1[i] == channel) {
4057 				return (&sc->sc_eep_map.band_1_channels[i]);
4058 			}
4059 		}
4060 	} else {  /* 5 GHz band */
4061 		for (i = 0; i < 13; i++) {
4062 			if (iwk_eep_band_2[i] == channel) {
4063 				return (&sc->sc_eep_map.band_2_channels[i]);
4064 			}
4065 		}
4066 		for (i = 0; i < 12; i++) {
4067 			if (iwk_eep_band_3[i] == channel) {
4068 				return (&sc->sc_eep_map.band_3_channels[i]);
4069 			}
4070 		}
4071 		for (i = 0; i < 11; i++) {
4072 			if (iwk_eep_band_4[i] == channel) {
4073 				return (&sc->sc_eep_map.band_4_channels[i]);
4074 			}
4075 		}
4076 		for (i = 0; i < 6; i++) {
4077 			if (iwk_eep_band_5[i] == channel) {
4078 				return (&sc->sc_eep_map.band_5_channels[i]);
4079 			}
4080 		}
4081 	}
4082 
4083 	return (NULL);
4084 }
4085 
4086 /*
4087  * Determine which subband a given channel belongs
4088  * to in 2.4 GHz or 5 GHz band
4089  */
4090 static int32_t iwk_band_number(iwk_sc_t *sc, uint16_t channel)
4091 {
4092 	int32_t b_n = -1;
4093 
4094 	for (b_n = 0; b_n < EEP_TX_POWER_BANDS; b_n++) {
4095 		if (0 == sc->sc_eep_map.calib_info.band_info_tbl[b_n].ch_from) {
4096 			continue;
4097 		}
4098 
4099 		if ((channel >=
4100 		    (uint16_t)sc->sc_eep_map.calib_info.
4101 		    band_info_tbl[b_n].ch_from) &&
4102 		    (channel <=
4103 		    (uint16_t)sc->sc_eep_map.calib_info.
4104 		    band_info_tbl[b_n].ch_to)) {
4105 			break;
4106 		}
4107 	}
4108 
4109 	return (b_n);
4110 }
4111 
4112 /* Make a special division for interpolation operation */
4113 static int iwk_division(int32_t num, int32_t denom, int32_t *res)
4114 {
4115 	int32_t sign = 1;
4116 
4117 	if (num < 0) {
4118 		sign = -sign;
4119 		num = -num;
4120 	}
4121 
4122 	if (denom < 0) {
4123 		sign = -sign;
4124 		denom = -denom;
4125 	}
4126 
4127 	*res = ((num*2 + denom) / (denom*2)) * sign;
4128 
4129 	return (IWK_SUCCESS);
4130 }
4131 
4132 /* Make interpolation operation */
4133 static int32_t iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1,
4134     int32_t x2, int32_t y2)
4135 {
4136 	int32_t val;
4137 
4138 	if (x2 == x1) {
4139 		return (y1);
4140 	} else {
4141 		(void) iwk_division((x2-x)*(y1-y2), (x2-x1), &val);
4142 		return (val + y2);
4143 	}
4144 }
4145 
4146 /* Get interpolation measurement data of a given channel for all chains. */
4147 static int iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel,
4148     struct iwk_eep_calib_channel_info *chan_info)
4149 {
4150 	int32_t ban_n;
4151 	uint32_t ch1_n, ch2_n;
4152 	int32_t c, m;
4153 	struct iwk_eep_calib_measure *m1_p, *m2_p, *m_p;
4154 
4155 	/* determine subband number */
4156 	ban_n = iwk_band_number(sc, channel);
4157 	if (ban_n >= EEP_TX_POWER_BANDS) {
4158 		return (DDI_FAILURE);
4159 	}
4160 
4161 	ch1_n =
4162 	    (uint32_t)sc->sc_eep_map.calib_info.band_info_tbl[ban_n].ch1.ch_num;
4163 	ch2_n =
4164 	    (uint32_t)sc->sc_eep_map.calib_info.band_info_tbl[ban_n].ch2.ch_num;
4165 
4166 	chan_info->ch_num = (uint8_t)channel;  /* given channel number */
4167 
4168 	/*
4169 	 * go through all chains on chipset
4170 	 */
4171 	for (c = 0; c < EEP_TX_POWER_TX_CHAINS; c++) {
4172 		/*
4173 		 * go through all factory measurements
4174 		 */
4175 		for (m = 0; m < EEP_TX_POWER_MEASUREMENTS; m++) {
4176 			m1_p =
4177 			    &(sc->sc_eep_map.calib_info.
4178 			    band_info_tbl[ban_n].ch1.measure[c][m]);
4179 			m2_p =
4180 			    &(sc->sc_eep_map.calib_info.band_info_tbl[ban_n].
4181 			    ch2.measure[c][m]);
4182 			m_p = &(chan_info->measure[c][m]);
4183 
4184 			/*
4185 			 * make interpolation to get actual
4186 			 * Tx power for given channel
4187 			 */
4188 			m_p->actual_pow = iwk_interpolate_value(channel,
4189 			    ch1_n, m1_p->actual_pow,
4190 			    ch2_n, m2_p->actual_pow);
4191 
4192 			/* make interpolation to get index into gain table */
4193 			m_p->gain_idx = iwk_interpolate_value(channel,
4194 			    ch1_n, m1_p->gain_idx,
4195 			    ch2_n, m2_p->gain_idx);
4196 
4197 			/* make interpolation to get chipset temperature */
4198 			m_p->temperature = iwk_interpolate_value(channel,
4199 			    ch1_n, m1_p->temperature,
4200 			    ch2_n, m2_p->temperature);
4201 
4202 			/*
4203 			 * make interpolation to get power
4204 			 * amp detector level
4205 			 */
4206 			m_p->pa_det = iwk_interpolate_value(channel, ch1_n,
4207 			    m1_p->pa_det,
4208 			    ch2_n, m2_p->pa_det);
4209 		}
4210 	}
4211 
4212 	return (IWK_SUCCESS);
4213 }
4214 
4215 /*
4216  * Calculate voltage compensation for Tx power. For more infomation,
4217  * please refer to iwk_calibration.h file
4218  */
4219 static int32_t iwk_voltage_compensation(int32_t eep_voltage,
4220     int32_t curr_voltage)
4221 {
4222 	int32_t vol_comp = 0;
4223 
4224 	if ((TX_POWER_IWK_ILLEGAL_VOLTAGE == eep_voltage) ||
4225 	    (TX_POWER_IWK_ILLEGAL_VOLTAGE == curr_voltage)) {
4226 		return (vol_comp);
4227 	}
4228 
4229 	(void) iwk_division(curr_voltage-eep_voltage,
4230 	    TX_POWER_IWK_VOLTAGE_CODES_PER_03V, &vol_comp);
4231 
4232 	if (curr_voltage > eep_voltage) {
4233 		vol_comp *= 2;
4234 	}
4235 	if ((vol_comp < -2) || (vol_comp > 2)) {
4236 		vol_comp = 0;
4237 	}
4238 
4239 	return (vol_comp);
4240 }
4241 
4242 /*
4243  * Thermal compensation values for txpower for various frequency ranges ...
4244  * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust
4245  */
4246 static struct iwk_txpower_tempera_comp {
4247 	int32_t degrees_per_05db_a;
4248 	int32_t degrees_per_05db_a_denom;
4249 } txpower_tempera_comp_table[CALIB_CH_GROUP_MAX] = {
4250 	{9, 2},			/* group 0 5.2, ch  34-43 */
4251 	{4, 1},			/* group 1 5.2, ch  44-70 */
4252 	{4, 1},			/* group 2 5.2, ch  71-124 */
4253 	{4, 1},			/* group 3 5.2, ch 125-200 */
4254 	{3, 1}			/* group 4 2.4, ch   all */
4255 };
4256 
4257 /*
4258  * bit-rate-dependent table to prevent Tx distortion, in half-dB units,
4259  * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates.
4260  */
4261 static int32_t back_off_table[] = {
4262 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
4263 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
4264 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
4265 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
4266 	10			/* CCK */
4267 };
4268 
4269 /* determine minimum Tx power index in gain table */
4270 static int32_t iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G)
4271 {
4272 	if ((!is_24G) && ((rate_pow_idx & 7) <= 4)) {
4273 		return (MIN_TX_GAIN_INDEX_52GHZ_EXT);
4274 	}
4275 
4276 	return (MIN_TX_GAIN_INDEX);
4277 }
4278 
4279 /*
4280  * Determine DSP and radio gain according to temperature and other factors.
4281  * This function is the majority of Tx power calibration
4282  */
4283 static int iwk_txpower_table_cmd_init(iwk_sc_t *sc,
4284     struct iwk_tx_power_db *tp_db)
4285 {
4286 	int is_24G, is_fat, is_high_chan, is_mimo;
4287 	int c, r;
4288 	int32_t target_power;
4289 	int32_t tx_grp = CALIB_CH_GROUP_MAX;
4290 	uint16_t channel;
4291 	uint8_t saturation_power;
4292 	int32_t regu_power;
4293 	int32_t curr_regu_power;
4294 	struct iwk_eep_channel *eep_chan_p;
4295 	struct iwk_eep_calib_channel_info eep_chan_calib;
4296 	int32_t eep_voltage, init_voltage;
4297 	int32_t voltage_compensation;
4298 	int32_t temperature;
4299 	int32_t degrees_per_05db_num;
4300 	int32_t degrees_per_05db_denom;
4301 	struct iwk_eep_calib_measure *measure_p;
4302 	int32_t interpo_temp;
4303 	int32_t power_limit;
4304 	int32_t atten_value;
4305 	int32_t tempera_comp[2];
4306 	int32_t interpo_gain_idx[2];
4307 	int32_t interpo_actual_pow[2];
4308 	union iwk_tx_power_dual_stream txpower_gains;
4309 	int32_t txpower_gains_idx;
4310 
4311 	channel = sc->sc_config.chan;
4312 
4313 	/* 2.4 GHz or 5 GHz band */
4314 	is_24G = iwk_is_24G_band(sc);
4315 
4316 	/* fat channel or not */
4317 	is_fat = iwk_is_fat_channel(sc);
4318 
4319 	/*
4320 	 * using low half channel number or high half channel number
4321 	 * identify fat channel
4322 	 */
4323 	if (is_fat && (sc->sc_config.flags &
4324 	    RXON_FLG_CONTROL_CHANNEL_LOC_HIGH_MSK)) {
4325 		is_high_chan = 1;
4326 	}
4327 
4328 	if ((channel > 0) && (channel < 200)) {
4329 		/* get regulatory channel data from eeprom */
4330 		eep_chan_p = iwk_get_eep_channel(sc, channel, is_24G,
4331 		    is_fat, is_high_chan);
4332 		if (NULL == eep_chan_p) {
4333 			cmn_err(CE_WARN,
4334 			    "iwk_txpower_table_cmd_init(): "
4335 			    "can't get channel infomation\n");
4336 			return (DDI_FAILURE);
4337 		}
4338 	} else {
4339 		cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4340 		    "channel(%d) isn't in proper range\n",
4341 		    channel);
4342 		return (DDI_FAILURE);
4343 	}
4344 
4345 	/* initial value of Tx power */
4346 	sc->sc_user_txpower = (int32_t)eep_chan_p->max_power_avg;
4347 	if (sc->sc_user_txpower < IWK_TX_POWER_TARGET_POWER_MIN) {
4348 		cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4349 		    "user TX power is too weak\n");
4350 		return (DDI_FAILURE);
4351 	} else if (sc->sc_user_txpower > IWK_TX_POWER_TARGET_POWER_MAX) {
4352 		cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4353 		    "user TX power is too strong\n");
4354 		return (DDI_FAILURE);
4355 	}
4356 
4357 	target_power = 2 * sc->sc_user_txpower;
4358 
4359 	/* determine which group current channel belongs to */
4360 	tx_grp = iwk_txpower_grp(channel);
4361 	if (tx_grp < 0) {
4362 		return (tx_grp);
4363 	}
4364 
4365 
4366 	if (is_fat) {
4367 		if (is_high_chan) {
4368 			channel -= 2;
4369 		} else {
4370 			channel += 2;
4371 		}
4372 	}
4373 
4374 	/* determine saturation power */
4375 	if (is_24G) {
4376 		saturation_power =
4377 		    sc->sc_eep_map.calib_info.saturation_power24;
4378 	} else {
4379 		saturation_power =
4380 		    sc->sc_eep_map.calib_info.saturation_power52;
4381 	}
4382 
4383 	if (saturation_power < IWK_TX_POWER_SATURATION_MIN ||
4384 	    saturation_power > IWK_TX_POWER_SATURATION_MAX) {
4385 		if (is_24G) {
4386 			saturation_power = IWK_TX_POWER_DEFAULT_SATURATION_24;
4387 		} else {
4388 			saturation_power = IWK_TX_POWER_DEFAULT_SATURATION_52;
4389 		}
4390 	}
4391 
4392 	/* determine regulatory power */
4393 	regu_power = (int32_t)eep_chan_p->max_power_avg * 2;
4394 	if ((regu_power < IWK_TX_POWER_REGULATORY_MIN) ||
4395 	    (regu_power > IWK_TX_POWER_REGULATORY_MAX)) {
4396 		if (is_24G) {
4397 			regu_power = IWK_TX_POWER_DEFAULT_REGULATORY_24;
4398 		} else {
4399 			regu_power = IWK_TX_POWER_DEFAULT_REGULATORY_52;
4400 		}
4401 	}
4402 
4403 	/*
4404 	 * get measurement data for current channel
4405 	 * suach as temperature,index to gain table,actual Tx power
4406 	 */
4407 	(void) iwk_channel_interpolate(sc, channel, &eep_chan_calib);
4408 
4409 	eep_voltage = (int32_t)sc->sc_eep_map.calib_info.voltage;
4410 	init_voltage = (int32_t)sc->sc_card_alive_init.voltage;
4411 
4412 	/* calculate voltage compensation to Tx power */
4413 	voltage_compensation =
4414 	    iwk_voltage_compensation(eep_voltage, init_voltage);
4415 
4416 	if (sc->sc_tempera >= IWK_TX_POWER_TEMPERATURE_MIN) {
4417 		temperature = sc->sc_tempera;
4418 	} else {
4419 		temperature = IWK_TX_POWER_TEMPERATURE_MIN;
4420 	}
4421 	if (sc->sc_tempera <= IWK_TX_POWER_TEMPERATURE_MAX) {
4422 		temperature = sc->sc_tempera;
4423 	} else {
4424 		temperature = IWK_TX_POWER_TEMPERATURE_MAX;
4425 	}
4426 	temperature = KELVIN_TO_CELSIUS(temperature);
4427 
4428 	degrees_per_05db_num =
4429 	    txpower_tempera_comp_table[tx_grp].degrees_per_05db_a;
4430 	degrees_per_05db_denom =
4431 	    txpower_tempera_comp_table[tx_grp].degrees_per_05db_a_denom;
4432 
4433 	for (c = 0; c < 2; c++) {  /* go through all chains */
4434 		measure_p = &eep_chan_calib.measure[c][1];
4435 		interpo_temp = measure_p->temperature;
4436 
4437 		/* determine temperature compensation to Tx power */
4438 		(void) iwk_division(
4439 		    (temperature-interpo_temp)*degrees_per_05db_denom,
4440 		    degrees_per_05db_num, &tempera_comp[c]);
4441 
4442 		interpo_gain_idx[c] = measure_p->gain_idx;
4443 		interpo_actual_pow[c] = measure_p->actual_pow;
4444 	}
4445 
4446 	/*
4447 	 * go through all rate entries in Tx power table
4448 	 */
4449 	for (r = 0; r < POWER_TABLE_NUM_ENTRIES; r++) {
4450 		if (r & 0x8) {
4451 			/* need to lower regulatory power for MIMO mode */
4452 			curr_regu_power = regu_power -
4453 			    IWK_TX_POWER_MIMO_REGULATORY_COMPENSATION;
4454 			is_mimo = 1;
4455 		} else {
4456 			curr_regu_power = regu_power;
4457 			is_mimo = 0;
4458 		}
4459 
4460 		power_limit = saturation_power - back_off_table[r];
4461 		if (power_limit > curr_regu_power) {
4462 			/* final Tx power limit */
4463 			power_limit = curr_regu_power;
4464 		}
4465 
4466 		if (target_power > power_limit) {
4467 			target_power = power_limit; /* final target Tx power */
4468 		}
4469 
4470 		for (c = 0; c < 2; c++) {	  /* go through all Tx chains */
4471 			if (is_mimo) {
4472 				atten_value =
4473 				    sc->sc_card_alive_init.tx_atten[tx_grp][c];
4474 			} else {
4475 				atten_value = 0;
4476 			}
4477 
4478 			/*
4479 			 * calculate index in gain table
4480 			 * this step is very important
4481 			 */
4482 			txpower_gains_idx = interpo_gain_idx[c] -
4483 			    (target_power - interpo_actual_pow[c]) -
4484 			    tempera_comp[c] - voltage_compensation +
4485 			    atten_value;
4486 
4487 			if (txpower_gains_idx <
4488 			    iwk_min_power_index(r, is_24G)) {
4489 				txpower_gains_idx =
4490 				    iwk_min_power_index(r, is_24G);
4491 			}
4492 
4493 			if (!is_24G) {
4494 				/*
4495 				 * support negative index for 5 GHz
4496 				 * band
4497 				 */
4498 				txpower_gains_idx += 9;
4499 			}
4500 
4501 			if (POWER_TABLE_CCK_ENTRY == r) {
4502 				/* for CCK mode, make necessary attenuaton */
4503 				txpower_gains_idx +=
4504 				    IWK_TX_POWER_CCK_COMPENSATION_C_STEP;
4505 			}
4506 
4507 			if (txpower_gains_idx > 107) {
4508 				txpower_gains_idx = 107;
4509 			} else if (txpower_gains_idx < 0) {
4510 				txpower_gains_idx = 0;
4511 			}
4512 
4513 			/* search DSP and radio gains in gain table */
4514 			txpower_gains.s.radio_tx_gain[c] =
4515 			    gains_table[is_24G][txpower_gains_idx].radio;
4516 			txpower_gains.s.dsp_predis_atten[c] =
4517 			    gains_table[is_24G][txpower_gains_idx].dsp;
4518 
4519 			IWK_DBG((IWK_DEBUG_CALIBRATION,
4520 			    "rate_index: %d, "
4521 			    "gain_index %d, c: %d,is_mimo: %d\n",
4522 			    r, txpower_gains_idx, c, is_mimo));
4523 		}
4524 
4525 		/* initialize Tx power table */
4526 		if (r < POWER_TABLE_NUM_HT_OFDM_ENTRIES) {
4527 			tp_db->ht_ofdm_power[r].dw = txpower_gains.dw;
4528 		} else {
4529 			tp_db->legacy_cck_power.dw = txpower_gains.dw;
4530 		}
4531 	}
4532 
4533 	return (IWK_SUCCESS);
4534 }
4535 
4536 /*
4537  * make Tx power calibration to adjust Tx power.
4538  * This is completed by sending out Tx power table command.
4539  */
4540 static int iwk_tx_power_calibration(iwk_sc_t *sc)
4541 {
4542 	iwk_tx_power_table_cmd_t cmd;
4543 	int rv;
4544 
4545 	if (sc->sc_flags & IWK_F_SCANNING) {
4546 		return (IWK_SUCCESS);
4547 	}
4548 
4549 	/* necessary initialization to Tx power table command */
4550 	cmd.band = (uint8_t)iwk_is_24G_band(sc);
4551 	cmd.channel = sc->sc_config.chan;
4552 	cmd.channel_normal_width = 0;
4553 
4554 	/* initialize Tx power table */
4555 	rv = iwk_txpower_table_cmd_init(sc, &cmd.tx_power);
4556 	if (rv) {
4557 		cmn_err(CE_NOTE, "rv= %d\n", rv);
4558 		return (rv);
4559 	}
4560 
4561 	/* send out Tx power table command */
4562 	rv = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &cmd, sizeof (cmd), 1);
4563 	if (rv) {
4564 		return (rv);
4565 	}
4566 
4567 	/* record current temperature */
4568 	sc->sc_last_tempera = sc->sc_tempera;
4569 
4570 	return (IWK_SUCCESS);
4571 }
4572 
4573 /* This function is the handler of statistics notification from uCode */
4574 static void iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc)
4575 {
4576 	int is_diff;
4577 	struct iwk_notif_statistics *statistics_p =
4578 	    (struct iwk_notif_statistics *)(desc + 1);
4579 
4580 	mutex_enter(&sc->sc_glock);
4581 
4582 	is_diff = (sc->sc_statistics.general.temperature !=
4583 	    statistics_p->general.temperature) ||
4584 	    ((sc->sc_statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK) !=
4585 	    (statistics_p->flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK));
4586 
4587 	/* update statistics data */
4588 	(void) memcpy(&sc->sc_statistics, statistics_p,
4589 	    sizeof (struct iwk_notif_statistics));
4590 
4591 	sc->sc_flags |= IWK_F_STATISTICS;
4592 
4593 	if (!(sc->sc_flags & IWK_F_SCANNING)) {
4594 		/* make Receiver gain balance calibration */
4595 		(void) iwk_rxgain_diff(sc);
4596 
4597 		/* make Receiver sensitivity calibration */
4598 		(void) iwk_rx_sens(sc);
4599 	}
4600 
4601 
4602 	if (!is_diff) {
4603 		mutex_exit(&sc->sc_glock);
4604 		return;
4605 	}
4606 
4607 	/* calibration current temperature of 4965 chipset */
4608 	sc->sc_tempera = iwk_curr_tempera(sc);
4609 
4610 	/* distinct temperature change will trigger Tx power calibration */
4611 	if (((sc->sc_tempera - sc->sc_last_tempera) >= 3) ||
4612 	    ((sc->sc_last_tempera - sc->sc_tempera) >= 3)) {
4613 		/* make Tx power calibration */
4614 		(void) iwk_tx_power_calibration(sc);
4615 	}
4616 
4617 	mutex_exit(&sc->sc_glock);
4618 }
4619 
4620 /* Determine this station is in associated state or not */
4621 static int iwk_is_associated(iwk_sc_t *sc)
4622 {
4623 	return (sc->sc_config.filter_flags & RXON_FILTER_ASSOC_MSK);
4624 }
4625 
4626 /* Make necessary preparation for Receiver gain balance calibration */
4627 static int iwk_rxgain_diff_init(iwk_sc_t *sc)
4628 {
4629 	int i, rv;
4630 	struct iwk_calibration_cmd cmd;
4631 	struct iwk_rx_gain_diff *gain_diff_p;
4632 
4633 	gain_diff_p = &sc->sc_rxgain_diff;
4634 
4635 	(void) memset(gain_diff_p, 0, sizeof (struct iwk_rx_gain_diff));
4636 	(void) memset(&cmd, 0, sizeof (struct iwk_calibration_cmd));
4637 
4638 	for (i = 0; i < RX_CHAINS_NUM; i++) {
4639 		gain_diff_p->gain_diff_chain[i] = CHAIN_GAIN_DIFF_INIT_VAL;
4640 	}
4641 
4642 	if (iwk_is_associated(sc)) {
4643 		cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
4644 		cmd.diff_gain_a = 0;
4645 		cmd.diff_gain_b = 0;
4646 		cmd.diff_gain_c = 0;
4647 
4648 		/* assume the gains of every Rx chains is balanceable */
4649 		rv = iwk_cmd(sc, REPLY_PHY_CALIBRATION_CMD, &cmd,
4650 		    sizeof (cmd), 1);
4651 		if (rv) {
4652 			return (rv);
4653 		}
4654 
4655 		gain_diff_p->state = IWK_GAIN_DIFF_ACCUMULATE;
4656 	}
4657 
4658 	return (IWK_SUCCESS);
4659 }
4660 
4661 /*
4662  * make Receiver gain balance to balance Rx gain between Rx chains
4663  * and determine which chain is disconnected
4664  */
4665 static int iwk_rxgain_diff(iwk_sc_t *sc)
4666 {
4667 	int i, is_24G, rv;
4668 	int max_beacon_chain_n;
4669 	int min_noise_chain_n;
4670 	uint16_t channel_n;
4671 	int32_t beacon_diff;
4672 	int32_t noise_diff;
4673 	uint32_t noise_chain_a, noise_chain_b, noise_chain_c;
4674 	uint32_t beacon_chain_a, beacon_chain_b, beacon_chain_c;
4675 	struct iwk_calibration_cmd cmd;
4676 	uint32_t beacon_aver[RX_CHAINS_NUM] = {0xFFFFFFFF};
4677 	uint32_t noise_aver[RX_CHAINS_NUM] = {0xFFFFFFFF};
4678 	struct statistics_rx_non_phy *rx_general_p =
4679 	    &sc->sc_statistics.rx.general;
4680 	struct iwk_rx_gain_diff *gain_diff_p = &sc->sc_rxgain_diff;
4681 
4682 	if (INTERFERENCE_DATA_AVAILABLE !=
4683 	    rx_general_p->interference_data_flag) {
4684 		return (IWK_SUCCESS);
4685 	}
4686 
4687 	if (IWK_GAIN_DIFF_ACCUMULATE != gain_diff_p->state) {
4688 		return (IWK_SUCCESS);
4689 	}
4690 
4691 	is_24G = iwk_is_24G_band(sc);
4692 	channel_n = sc->sc_config.chan;	 /* channel number */
4693 
4694 	if ((channel_n != (sc->sc_statistics.flag >> 16)) ||
4695 	    ((STATISTICS_REPLY_FLG_BAND_24G_MSK ==
4696 	    (sc->sc_statistics.flag & STATISTICS_REPLY_FLG_BAND_24G_MSK)) &&
4697 	    !is_24G)) {
4698 		return (IWK_SUCCESS);
4699 	}
4700 
4701 	/* Rx chain's noise strength from statistics notification */
4702 	noise_chain_a = rx_general_p->beacon_silence_rssi_a & 0xFF;
4703 	noise_chain_b = rx_general_p->beacon_silence_rssi_b & 0xFF;
4704 	noise_chain_c = rx_general_p->beacon_silence_rssi_c & 0xFF;
4705 
4706 	/* Rx chain's beacon strength from statistics notification */
4707 	beacon_chain_a = rx_general_p->beacon_rssi_a & 0xFF;
4708 	beacon_chain_b = rx_general_p->beacon_rssi_b & 0xFF;
4709 	beacon_chain_c = rx_general_p->beacon_rssi_c & 0xFF;
4710 
4711 	gain_diff_p->beacon_count++;
4712 
4713 	/* accumulate chain's noise strength */
4714 	gain_diff_p->noise_stren_a += noise_chain_a;
4715 	gain_diff_p->noise_stren_b += noise_chain_b;
4716 	gain_diff_p->noise_stren_c += noise_chain_c;
4717 
4718 	/* accumulate chain's beacon strength */
4719 	gain_diff_p->beacon_stren_a += beacon_chain_a;
4720 	gain_diff_p->beacon_stren_b += beacon_chain_b;
4721 	gain_diff_p->beacon_stren_c += beacon_chain_c;
4722 
4723 	if (BEACON_NUM_20 == gain_diff_p->beacon_count) {
4724 		/* calculate average beacon strength */
4725 		beacon_aver[0] = (gain_diff_p->beacon_stren_a) / BEACON_NUM_20;
4726 		beacon_aver[1] = (gain_diff_p->beacon_stren_b) / BEACON_NUM_20;
4727 		beacon_aver[2] = (gain_diff_p->beacon_stren_c) / BEACON_NUM_20;
4728 
4729 		/* calculate average noise strength */
4730 		noise_aver[0] = (gain_diff_p->noise_stren_a) / BEACON_NUM_20;
4731 		noise_aver[1] = (gain_diff_p->noise_stren_b) / BEACON_NUM_20;
4732 		noise_aver[2] = (gain_diff_p->noise_stren_b) / BEACON_NUM_20;
4733 
4734 		/* determine maximum beacon strength among 3 chains */
4735 		if ((beacon_aver[0] >= beacon_aver[1]) &&
4736 		    (beacon_aver[0] >= beacon_aver[2])) {
4737 			max_beacon_chain_n = 0;
4738 			gain_diff_p->connected_chains = 1 << 0;
4739 		} else if (beacon_aver[1] >= beacon_aver[2]) {
4740 			max_beacon_chain_n = 1;
4741 			gain_diff_p->connected_chains = 1 << 1;
4742 		} else {
4743 			max_beacon_chain_n = 2;
4744 			gain_diff_p->connected_chains = 1 << 2;
4745 		}
4746 
4747 		/* determine which chain is disconnected */
4748 		for (i = 0; i < RX_CHAINS_NUM; i++) {
4749 			if (i != max_beacon_chain_n) {
4750 				beacon_diff = beacon_aver[max_beacon_chain_n] -
4751 				    beacon_aver[i];
4752 				if (beacon_diff > MAX_ALLOWED_DIFF) {
4753 					gain_diff_p->disconnect_chain[i] = 1;
4754 				} else {
4755 					gain_diff_p->connected_chains |=
4756 					    (1 << i);
4757 				}
4758 			}
4759 		}
4760 
4761 		/*
4762 		 * if chain A and B are both disconnected,
4763 		 * assume the stronger in beacon strength is connected
4764 		 */
4765 		if (gain_diff_p->disconnect_chain[0] &&
4766 		    gain_diff_p->disconnect_chain[1]) {
4767 			if (beacon_aver[0] >= beacon_aver[1]) {
4768 				gain_diff_p->disconnect_chain[0] = 0;
4769 				gain_diff_p->connected_chains |= (1 << 0);
4770 			} else {
4771 				gain_diff_p->disconnect_chain[1] = 0;
4772 				gain_diff_p->connected_chains |= (1 << 1);
4773 			}
4774 		}
4775 
4776 		/* determine minimum noise strength among 3 chains */
4777 		if (!gain_diff_p->disconnect_chain[0]) {
4778 			min_noise_chain_n = 0;
4779 
4780 			for (i = 0; i < RX_CHAINS_NUM; i++) {
4781 				if (!gain_diff_p->disconnect_chain[i] &&
4782 				    (noise_aver[i] <=
4783 				    noise_aver[min_noise_chain_n])) {
4784 					min_noise_chain_n = i;
4785 				}
4786 
4787 			}
4788 		} else {
4789 			min_noise_chain_n = 1;
4790 
4791 			for (i = 0; i < RX_CHAINS_NUM; i++) {
4792 				if (!gain_diff_p->disconnect_chain[i] &&
4793 				    (noise_aver[i] <=
4794 				    noise_aver[min_noise_chain_n])) {
4795 					min_noise_chain_n = i;
4796 				}
4797 			}
4798 		}
4799 
4800 		gain_diff_p->gain_diff_chain[min_noise_chain_n] = 0;
4801 
4802 		/* determine gain difference between chains */
4803 		for (i = 0; i < RX_CHAINS_NUM; i++) {
4804 			if (!gain_diff_p->disconnect_chain[i] &&
4805 			    (CHAIN_GAIN_DIFF_INIT_VAL ==
4806 			    gain_diff_p->gain_diff_chain[i])) {
4807 
4808 				noise_diff = noise_aver[i] -
4809 				    noise_aver[min_noise_chain_n];
4810 				gain_diff_p->gain_diff_chain[i] =
4811 				    (uint8_t)((noise_diff * 10) / 15);
4812 
4813 				if (gain_diff_p->gain_diff_chain[i] > 3) {
4814 					gain_diff_p->gain_diff_chain[i] = 3;
4815 				}
4816 
4817 				gain_diff_p->gain_diff_chain[i] |= (1 << 2);
4818 			} else {
4819 				gain_diff_p->gain_diff_chain[i] = 0;
4820 			}
4821 		}
4822 
4823 		if (!gain_diff_p->gain_diff_send) {
4824 			gain_diff_p->gain_diff_send = 1;
4825 
4826 			(void) memset(&cmd, 0, sizeof (cmd));
4827 
4828 			cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
4829 			cmd.diff_gain_a = gain_diff_p->gain_diff_chain[0];
4830 			cmd.diff_gain_b = gain_diff_p->gain_diff_chain[1];
4831 			cmd.diff_gain_c = gain_diff_p->gain_diff_chain[2];
4832 
4833 			/*
4834 			 * send out PHY calibration command to
4835 			 * adjust every chain's Rx gain
4836 			 */
4837 			rv = iwk_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
4838 			    &cmd, sizeof (cmd), 1);
4839 			if (rv) {
4840 				return (rv);
4841 			}
4842 
4843 			gain_diff_p->state = IWK_GAIN_DIFF_CALIBRATED;
4844 		}
4845 
4846 		gain_diff_p->beacon_stren_a = 0;
4847 		gain_diff_p->beacon_stren_b = 0;
4848 		gain_diff_p->beacon_stren_c = 0;
4849 
4850 		gain_diff_p->noise_stren_a = 0;
4851 		gain_diff_p->noise_stren_b = 0;
4852 		gain_diff_p->noise_stren_c = 0;
4853 	}
4854 
4855 	return (IWK_SUCCESS);
4856 }
4857 
4858 /* Make necessary preparation for Receiver sensitivity calibration */
4859 static int iwk_rx_sens_init(iwk_sc_t *sc)
4860 {
4861 	int i, rv;
4862 	struct iwk_rx_sensitivity_cmd cmd;
4863 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
4864 
4865 	(void) memset(&cmd, 0, sizeof (struct iwk_rx_sensitivity_cmd));
4866 	(void) memset(rx_sens_p, 0, sizeof (struct iwk_rx_sensitivity));
4867 
4868 	rx_sens_p->auto_corr_ofdm_x4 = 90;
4869 	rx_sens_p->auto_corr_mrc_ofdm_x4 = 170;
4870 	rx_sens_p->auto_corr_ofdm_x1 = 105;
4871 	rx_sens_p->auto_corr_mrc_ofdm_x1 = 220;
4872 
4873 	rx_sens_p->auto_corr_cck_x4 = 125;
4874 	rx_sens_p->auto_corr_mrc_cck_x4 = 200;
4875 	rx_sens_p->min_energy_det_cck = 100;
4876 
4877 	rx_sens_p->flags &= (~IWK_SENSITIVITY_CALIB_ALLOW_MSK);
4878 	rx_sens_p->flags &= (~IWK_SENSITIVITY_OFDM_UPDATE_MSK);
4879 	rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
4880 
4881 	rx_sens_p->last_bad_plcp_cnt_ofdm = 0;
4882 	rx_sens_p->last_false_alarm_cnt_ofdm = 0;
4883 	rx_sens_p->last_bad_plcp_cnt_cck = 0;
4884 	rx_sens_p->last_false_alarm_cnt_cck = 0;
4885 
4886 	rx_sens_p->cck_curr_state = IWK_TOO_MANY_FALSE_ALARM;
4887 	rx_sens_p->cck_prev_state = IWK_TOO_MANY_FALSE_ALARM;
4888 	rx_sens_p->cck_no_false_alarm_num = 0;
4889 	rx_sens_p->cck_beacon_idx = 0;
4890 
4891 	for (i = 0; i < 10; i++) {
4892 		rx_sens_p->cck_beacon_min[i] = 0;
4893 	}
4894 
4895 	rx_sens_p->cck_noise_idx = 0;
4896 	rx_sens_p->cck_noise_ref = 0;
4897 
4898 	for (i = 0; i < 20; i++) {
4899 		rx_sens_p->cck_noise_max[i] = 0;
4900 	}
4901 
4902 	rx_sens_p->cck_noise_diff = 0;
4903 	rx_sens_p->cck_no_false_alarm_num = 0;
4904 
4905 	cmd.control = IWK_SENSITIVITY_CONTROL_WORK_TABLE;
4906 
4907 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
4908 	    rx_sens_p->auto_corr_ofdm_x4;
4909 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
4910 	    rx_sens_p->auto_corr_mrc_ofdm_x4;
4911 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
4912 	    rx_sens_p->auto_corr_ofdm_x1;
4913 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
4914 	    rx_sens_p->auto_corr_mrc_ofdm_x1;
4915 
4916 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
4917 	    rx_sens_p->auto_corr_cck_x4;
4918 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
4919 	    rx_sens_p->auto_corr_mrc_cck_x4;
4920 	cmd.table[MIN_ENERGY_CCK_DET_IDX] = rx_sens_p->min_energy_det_cck;
4921 
4922 	cmd.table[MIN_ENERGY_OFDM_DET_IDX] = 100;
4923 	cmd.table[BARKER_CORR_TH_ADD_MIN_IDX] = 190;
4924 	cmd.table[BARKER_CORR_TH_ADD_MIN_MRC_IDX] = 390;
4925 	cmd.table[PTAM_ENERGY_TH_IDX] = 62;
4926 
4927 	/* at first, set up Rx to maximum sensitivity */
4928 	rv = iwk_cmd(sc, SENSITIVITY_CMD, &cmd, sizeof (cmd), 1);
4929 	if (rv) {
4930 		cmn_err(CE_WARN, "iwk_rx_sens_init(): "
4931 		    "in the process of initialization, "
4932 		    "failed to send rx sensitivity command\n");
4933 		return (rv);
4934 	}
4935 
4936 	rx_sens_p->flags |= IWK_SENSITIVITY_CALIB_ALLOW_MSK;
4937 
4938 	return (IWK_SUCCESS);
4939 }
4940 
4941 /*
4942  * make Receiver sensitivity calibration to adjust every chain's Rx sensitivity.
4943  * for more infomation, please refer to iwk_calibration.h file
4944  */
4945 static int iwk_rx_sens(iwk_sc_t *sc)
4946 {
4947 	int rv;
4948 	uint32_t actual_rx_time;
4949 	struct statistics_rx_non_phy *rx_general_p =
4950 	    &sc->sc_statistics.rx.general;
4951 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
4952 	struct iwk_rx_sensitivity_cmd cmd;
4953 
4954 	if (!(rx_sens_p->flags & IWK_SENSITIVITY_CALIB_ALLOW_MSK)) {
4955 		cmn_err(CE_WARN, "iwk_rx_sens(): "
4956 		    "sensitivity initialization has not finished.\n");
4957 		return (DDI_FAILURE);
4958 	}
4959 
4960 	if (INTERFERENCE_DATA_AVAILABLE !=
4961 	    rx_general_p->interference_data_flag) {
4962 		cmn_err(CE_WARN, "iwk_rx_sens(): "
4963 		    "can't make rx sensitivity calibration,"
4964 		    "because of invalid statistics\n");
4965 		return (DDI_FAILURE);
4966 	}
4967 
4968 	actual_rx_time = rx_general_p->channel_load;
4969 	if (!actual_rx_time) {
4970 		cmn_err(CE_WARN, "iwk_rx_sens(): "
4971 		    "can't make rx sensitivity calibration,"
4972 		    "because has not enough rx time\n");
4973 		return (DDI_FAILURE);
4974 	}
4975 
4976 	/* make Rx sensitivity calibration for OFDM mode */
4977 	rv = iwk_ofdm_sens(sc, actual_rx_time);
4978 	if (rv) {
4979 		return (rv);
4980 	}
4981 
4982 	/* make Rx sensitivity calibration for CCK mode */
4983 	rv = iwk_cck_sens(sc, actual_rx_time);
4984 	if (rv) {
4985 		return (rv);
4986 	}
4987 
4988 	/*
4989 	 * if the sum of false alarm had not changed, nothing will be done
4990 	 */
4991 	if ((!(rx_sens_p->flags & IWK_SENSITIVITY_OFDM_UPDATE_MSK)) &&
4992 	    (!(rx_sens_p->flags & IWK_SENSITIVITY_CCK_UPDATE_MSK))) {
4993 		return (IWK_SUCCESS);
4994 	}
4995 
4996 	cmd.control = IWK_SENSITIVITY_CONTROL_WORK_TABLE;
4997 
4998 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
4999 	    rx_sens_p->auto_corr_ofdm_x4;
5000 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
5001 	    rx_sens_p->auto_corr_mrc_ofdm_x4;
5002 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
5003 	    rx_sens_p->auto_corr_ofdm_x1;
5004 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
5005 	    rx_sens_p->auto_corr_mrc_ofdm_x1;
5006 
5007 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
5008 	    rx_sens_p->auto_corr_cck_x4;
5009 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
5010 	    rx_sens_p->auto_corr_mrc_cck_x4;
5011 	cmd.table[MIN_ENERGY_CCK_DET_IDX] =
5012 	    rx_sens_p->min_energy_det_cck;
5013 
5014 	cmd.table[MIN_ENERGY_OFDM_DET_IDX] = 100;
5015 	cmd.table[BARKER_CORR_TH_ADD_MIN_IDX] = 190;
5016 	cmd.table[BARKER_CORR_TH_ADD_MIN_MRC_IDX] = 390;
5017 	cmd.table[PTAM_ENERGY_TH_IDX] = 62;
5018 
5019 	/*
5020 	 * send sensitivity command to complete actual sensitivity calibration
5021 	 */
5022 	rv = iwk_cmd(sc, SENSITIVITY_CMD, &cmd, sizeof (cmd), 1);
5023 	if (rv) {
5024 		cmn_err(CE_WARN, "iwk_rx_sens(): "
5025 		    "fail to send rx sensitivity command\n");
5026 		return (rv);
5027 	}
5028 
5029 	return (IWK_SUCCESS);
5030 
5031 }
5032 
5033 /*
5034  * make Rx sensitivity calibration for CCK mode.
5035  * This is preparing parameters for Sensitivity command
5036  */
5037 static int iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time)
5038 {
5039 	int i;
5040 	uint8_t noise_a, noise_b, noise_c;
5041 	uint8_t max_noise_abc, max_noise_20;
5042 	uint32_t beacon_a, beacon_b, beacon_c;
5043 	uint32_t min_beacon_abc, max_beacon_10;
5044 	uint32_t cck_fa, cck_bp;
5045 	uint32_t cck_sum_fa_bp;
5046 	uint32_t temp;
5047 	struct statistics_rx_non_phy *rx_general_p =
5048 	    &sc->sc_statistics.rx.general;
5049 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5050 
5051 	cck_fa = sc->sc_statistics.rx.cck.false_alarm_cnt;
5052 	cck_bp = sc->sc_statistics.rx.cck.plcp_err;
5053 
5054 	/* accumulate false alarm */
5055 	if (rx_sens_p->last_false_alarm_cnt_cck > cck_fa) {
5056 		temp = rx_sens_p->last_false_alarm_cnt_cck;
5057 		rx_sens_p->last_false_alarm_cnt_cck = cck_fa;
5058 		cck_fa += (0xFFFFFFFF - temp);
5059 	} else {
5060 		cck_fa -= rx_sens_p->last_false_alarm_cnt_cck;
5061 		rx_sens_p->last_false_alarm_cnt_cck += cck_fa;
5062 	}
5063 
5064 	/* accumulate bad plcp */
5065 	if (rx_sens_p->last_bad_plcp_cnt_cck > cck_bp) {
5066 		temp = rx_sens_p->last_bad_plcp_cnt_cck;
5067 		rx_sens_p->last_bad_plcp_cnt_cck = cck_bp;
5068 		cck_bp += (0xFFFFFFFF - temp);
5069 	} else {
5070 		cck_bp -= rx_sens_p->last_bad_plcp_cnt_cck;
5071 		rx_sens_p->last_bad_plcp_cnt_cck += cck_bp;
5072 	}
5073 
5074 	/*
5075 	 * calculate relative value
5076 	 */
5077 	cck_sum_fa_bp = (cck_fa + cck_bp) * 200 * 1024;
5078 	rx_sens_p->cck_noise_diff = 0;
5079 
5080 	noise_a =
5081 	    (uint8_t)((rx_general_p->beacon_silence_rssi_a & 0xFF00) >> 8);
5082 	noise_b =
5083 	    (uint8_t)((rx_general_p->beacon_silence_rssi_b & 0xFF00) >> 8);
5084 	noise_c =
5085 	    (uint8_t)((rx_general_p->beacon_silence_rssi_c & 0xFF00) >> 8);
5086 
5087 	beacon_a = rx_general_p->beacon_energy_a;
5088 	beacon_b = rx_general_p->beacon_energy_b;
5089 	beacon_c = rx_general_p->beacon_energy_c;
5090 
5091 	/* determine maximum noise among 3 chains */
5092 	if ((noise_a >= noise_b) && (noise_a >= noise_c)) {
5093 		max_noise_abc = noise_a;
5094 	} else if (noise_b >= noise_c) {
5095 		max_noise_abc = noise_b;
5096 	} else {
5097 		max_noise_abc = noise_c;
5098 	}
5099 
5100 	/* record maximum noise among 3 chains */
5101 	rx_sens_p->cck_noise_max[rx_sens_p->cck_noise_idx] = max_noise_abc;
5102 	rx_sens_p->cck_noise_idx++;
5103 	if (rx_sens_p->cck_noise_idx >= 20) {
5104 		rx_sens_p->cck_noise_idx = 0;
5105 	}
5106 
5107 	/* determine maximum noise among 20 max noise */
5108 	max_noise_20 = rx_sens_p->cck_noise_max[0];
5109 	for (i = 0; i < 20; i++) {
5110 		if (rx_sens_p->cck_noise_max[i] >= max_noise_20) {
5111 			max_noise_20 = rx_sens_p->cck_noise_max[i];
5112 		}
5113 	}
5114 
5115 	/* determine minimum beacon among 3 chains */
5116 	if ((beacon_a <= beacon_b) && (beacon_a <= beacon_c)) {
5117 		min_beacon_abc = beacon_a;
5118 	} else if (beacon_b <= beacon_c) {
5119 		min_beacon_abc = beacon_b;
5120 	} else {
5121 		min_beacon_abc = beacon_c;
5122 	}
5123 
5124 	/* record miminum beacon among 3 chains */
5125 	rx_sens_p->cck_beacon_min[rx_sens_p->cck_beacon_idx] = min_beacon_abc;
5126 	rx_sens_p->cck_beacon_idx++;
5127 	if (rx_sens_p->cck_beacon_idx >= 10) {
5128 		rx_sens_p->cck_beacon_idx = 0;
5129 	}
5130 
5131 	/* determine maximum beacon among 10 miminum beacon among 3 chains */
5132 	max_beacon_10 = rx_sens_p->cck_beacon_min[0];
5133 	for (i = 0; i < 10; i++) {
5134 		if (rx_sens_p->cck_beacon_min[i] >= max_beacon_10) {
5135 			max_beacon_10 = rx_sens_p->cck_beacon_min[i];
5136 		}
5137 	}
5138 
5139 	/* add a little margin */
5140 	max_beacon_10 += 6;
5141 
5142 	/* record the count of having no false alarms */
5143 	if (cck_sum_fa_bp < (5 * actual_rx_time)) {
5144 		rx_sens_p->cck_no_false_alarm_num++;
5145 	} else {
5146 		rx_sens_p->cck_no_false_alarm_num = 0;
5147 	}
5148 
5149 	/*
5150 	 * adjust parameters in sensitivity command
5151 	 * according to different status.
5152 	 * for more infomation, please refer to iwk_calibration.h file
5153 	 */
5154 	if (cck_sum_fa_bp > (50 * actual_rx_time)) {
5155 		rx_sens_p->cck_curr_state = IWK_TOO_MANY_FALSE_ALARM;
5156 
5157 		if (rx_sens_p->auto_corr_cck_x4 > 160) {
5158 			rx_sens_p->cck_noise_ref = max_noise_20;
5159 
5160 			if (rx_sens_p->min_energy_det_cck > 2) {
5161 				rx_sens_p->min_energy_det_cck -= 2;
5162 			}
5163 		}
5164 
5165 		if (rx_sens_p->auto_corr_cck_x4 < 160) {
5166 			rx_sens_p->auto_corr_cck_x4 = 160 + 1;
5167 		} else {
5168 			if ((rx_sens_p->auto_corr_cck_x4 + 3) < 200) {
5169 				rx_sens_p->auto_corr_cck_x4 += 3;
5170 			} else {
5171 				rx_sens_p->auto_corr_cck_x4 = 200;
5172 			}
5173 		}
5174 
5175 		if ((rx_sens_p->auto_corr_mrc_cck_x4 + 3) < 400) {
5176 			rx_sens_p->auto_corr_mrc_cck_x4 += 3;
5177 		} else {
5178 			rx_sens_p->auto_corr_mrc_cck_x4 = 400;
5179 		}
5180 
5181 		rx_sens_p->flags |= IWK_SENSITIVITY_CCK_UPDATE_MSK;
5182 
5183 	} else if (cck_sum_fa_bp < (5 * actual_rx_time)) {
5184 		rx_sens_p->cck_curr_state = IWK_TOO_FEW_FALSE_ALARM;
5185 
5186 		rx_sens_p->cck_noise_diff = (int32_t)rx_sens_p->cck_noise_ref -
5187 		    (int32_t)max_noise_20;
5188 
5189 		if ((rx_sens_p->cck_prev_state != IWK_TOO_MANY_FALSE_ALARM) &&
5190 		    ((rx_sens_p->cck_noise_diff > 2) ||
5191 		    (rx_sens_p->cck_no_false_alarm_num > 100))) {
5192 			if ((rx_sens_p->min_energy_det_cck + 2) < 97) {
5193 				rx_sens_p->min_energy_det_cck += 2;
5194 			} else {
5195 				rx_sens_p->min_energy_det_cck = 97;
5196 			}
5197 
5198 			if ((rx_sens_p->auto_corr_cck_x4 - 3) > 125) {
5199 				rx_sens_p->auto_corr_cck_x4 -= 3;
5200 			} else {
5201 				rx_sens_p->auto_corr_cck_x4 = 125;
5202 			}
5203 
5204 			if ((rx_sens_p->auto_corr_mrc_cck_x4 -3) > 200) {
5205 				rx_sens_p->auto_corr_mrc_cck_x4 -= 3;
5206 			} else {
5207 				rx_sens_p->auto_corr_mrc_cck_x4 = 200;
5208 			}
5209 
5210 			rx_sens_p->flags |= IWK_SENSITIVITY_CCK_UPDATE_MSK;
5211 		} else {
5212 			rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
5213 		}
5214 	} else {
5215 		rx_sens_p->cck_curr_state = IWK_GOOD_RANGE_FALSE_ALARM;
5216 
5217 		rx_sens_p->cck_noise_ref = max_noise_20;
5218 
5219 		if (IWK_TOO_MANY_FALSE_ALARM == rx_sens_p->cck_prev_state) {
5220 			rx_sens_p->min_energy_det_cck -= 8;
5221 		}
5222 
5223 		rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
5224 	}
5225 
5226 	if (rx_sens_p->min_energy_det_cck < max_beacon_10) {
5227 		rx_sens_p->min_energy_det_cck = (uint16_t)max_beacon_10;
5228 	}
5229 
5230 	rx_sens_p->cck_prev_state = rx_sens_p->cck_curr_state;
5231 
5232 	return (IWK_SUCCESS);
5233 }
5234 
5235 /*
5236  * make Rx sensitivity calibration for OFDM mode.
5237  * This is preparing parameters for Sensitivity command
5238  */
5239 static int iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time)
5240 {
5241 	uint32_t temp;
5242 	uint16_t temp1;
5243 	uint32_t ofdm_fa, ofdm_bp;
5244 	uint32_t ofdm_sum_fa_bp;
5245 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5246 
5247 	ofdm_fa = sc->sc_statistics.rx.ofdm.false_alarm_cnt;
5248 	ofdm_bp = sc->sc_statistics.rx.ofdm.plcp_err;
5249 
5250 	/* accumulate false alarm */
5251 	if (rx_sens_p->last_false_alarm_cnt_ofdm > ofdm_fa) {
5252 		temp = rx_sens_p->last_false_alarm_cnt_ofdm;
5253 		rx_sens_p->last_false_alarm_cnt_ofdm = ofdm_fa;
5254 		ofdm_fa += (0xFFFFFFFF - temp);
5255 	} else {
5256 		ofdm_fa -= rx_sens_p->last_false_alarm_cnt_ofdm;
5257 		rx_sens_p->last_false_alarm_cnt_ofdm += ofdm_fa;
5258 	}
5259 
5260 	/* accumulate bad plcp */
5261 	if (rx_sens_p->last_bad_plcp_cnt_ofdm > ofdm_bp) {
5262 		temp = rx_sens_p->last_bad_plcp_cnt_ofdm;
5263 		rx_sens_p->last_bad_plcp_cnt_ofdm = ofdm_bp;
5264 		ofdm_bp += (0xFFFFFFFF - temp);
5265 	} else {
5266 		ofdm_bp -= rx_sens_p->last_bad_plcp_cnt_ofdm;
5267 		rx_sens_p->last_bad_plcp_cnt_ofdm += ofdm_bp;
5268 	}
5269 
5270 	ofdm_sum_fa_bp = (ofdm_fa + ofdm_bp) * 200 * 1024; /* relative value */
5271 
5272 	/*
5273 	 * adjust parameter in sensitivity command according to different status
5274 	 */
5275 	if (ofdm_sum_fa_bp > (50 * actual_rx_time)) {
5276 		temp1 = rx_sens_p->auto_corr_ofdm_x4 + 1;
5277 		rx_sens_p->auto_corr_ofdm_x4 = (temp1 <= 120) ? temp1 : 120;
5278 
5279 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x4 + 1;
5280 		rx_sens_p->auto_corr_mrc_ofdm_x4 =
5281 		    (temp1 <= 210) ? temp1 : 210;
5282 
5283 		temp1 = rx_sens_p->auto_corr_ofdm_x1 + 1;
5284 		rx_sens_p->auto_corr_ofdm_x1 = (temp1 <= 140) ? temp1 : 140;
5285 
5286 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x1 + 1;
5287 		rx_sens_p->auto_corr_mrc_ofdm_x1 =
5288 		    (temp1 <= 270) ? temp1 : 270;
5289 
5290 		rx_sens_p->flags |= IWK_SENSITIVITY_OFDM_UPDATE_MSK;
5291 
5292 	} else if (ofdm_sum_fa_bp < (5 * actual_rx_time)) {
5293 		temp1 = rx_sens_p->auto_corr_ofdm_x4 - 1;
5294 		rx_sens_p->auto_corr_ofdm_x4 = (temp1 >= 85) ? temp1 : 85;
5295 
5296 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x4 - 1;
5297 		rx_sens_p->auto_corr_mrc_ofdm_x4 =
5298 		    (temp1 >= 170) ? temp1 : 170;
5299 
5300 		temp1 = rx_sens_p->auto_corr_ofdm_x1 - 1;
5301 		rx_sens_p->auto_corr_ofdm_x1 = (temp1 >= 105) ? temp1 : 105;
5302 
5303 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x1 - 1;
5304 		rx_sens_p->auto_corr_mrc_ofdm_x1 =
5305 		    (temp1 >= 220) ? temp1 : 220;
5306 
5307 		rx_sens_p->flags |= IWK_SENSITIVITY_OFDM_UPDATE_MSK;
5308 
5309 	} else {
5310 		rx_sens_p->flags &= (~IWK_SENSITIVITY_OFDM_UPDATE_MSK);
5311 	}
5312 
5313 	return (IWK_SUCCESS);
5314 }
5315 
5316 /*
5317  * 1)  log_event_table_ptr indicates base of the event log.  This traces
5318  *     a 256-entry history of uCode execution within a circular buffer.
5319  *     Its header format is:
5320  *
5321  *	uint32_t log_size;	log capacity (in number of entries)
5322  *	uint32_t type;	(1) timestamp with each entry, (0) no timestamp
5323  *	uint32_t wraps;	# times uCode has wrapped to top of circular buffer
5324  *      uint32_t write_index;	next circular buffer entry that uCode would fill
5325  *
5326  *     The header is followed by the circular buffer of log entries.  Entries
5327  *     with timestamps have the following format:
5328  *
5329  *	uint32_t event_id;     range 0 - 1500
5330  *	uint32_t timestamp;    low 32 bits of TSF (of network, if associated)
5331  *	uint32_t data;         event_id-specific data value
5332  *
5333  *     Entries without timestamps contain only event_id and data.
5334  */
5335 
5336 /*
5337  * iwk_write_event_log - Write event log to dmesg
5338  */
5339 static void iwk_write_event_log(iwk_sc_t *sc)
5340 {
5341 	uint32_t log_event_table_ptr;	/* Start address of event table */
5342 	uint32_t startptr;	/* Start address of log data */
5343 	uint32_t logptr;	/* address of log data entry */
5344 	uint32_t i, n, num_events;
5345 	uint32_t event_id, data1, data2; /* log data */
5346 
5347 	uint32_t log_size;   /* log capacity (in number of entries) */
5348 	uint32_t type;	/* (1)timestamp with each entry,(0) no timestamp */
5349 	uint32_t wraps;	/* # times uCode has wrapped to */
5350 			/* the top of circular buffer */
5351 	uint32_t idx; /* index of entry to be filled in next */
5352 
5353 	log_event_table_ptr = sc->sc_card_alive_run.log_event_table_ptr;
5354 	if (!(log_event_table_ptr)) {
5355 		IWK_DBG((IWK_DEBUG_EEPROM, "NULL event table pointer\n"));
5356 		return;
5357 	}
5358 
5359 	iwk_mac_access_enter(sc);
5360 
5361 	/* Read log header */
5362 	log_size = iwk_mem_read(sc, log_event_table_ptr);
5363 	log_event_table_ptr += sizeof (uint32_t); /* addr of "type" */
5364 	type = iwk_mem_read(sc, log_event_table_ptr);
5365 	log_event_table_ptr += sizeof (uint32_t); /* addr of "wraps" */
5366 	wraps = iwk_mem_read(sc, log_event_table_ptr);
5367 	log_event_table_ptr += sizeof (uint32_t); /* addr of "idx" */
5368 	idx = iwk_mem_read(sc, log_event_table_ptr);
5369 	startptr = log_event_table_ptr +
5370 	    sizeof (uint32_t); /* addr of start of log data */
5371 	if (!log_size & !wraps) {
5372 		IWK_DBG((IWK_DEBUG_EEPROM, "Empty log\n"));
5373 		iwk_mac_access_exit(sc);
5374 		return;
5375 	}
5376 
5377 	if (!wraps) {
5378 		num_events = idx;
5379 		logptr = startptr;
5380 	} else {
5381 		num_events = log_size - idx;
5382 		n = type ? 2 : 3;
5383 		logptr = startptr + (idx * n * sizeof (uint32_t));
5384 	}
5385 
5386 	for (i = 0; i < num_events; i++) {
5387 		event_id = iwk_mem_read(sc, logptr);
5388 		logptr += sizeof (uint32_t);
5389 		data1 = iwk_mem_read(sc, logptr);
5390 		logptr += sizeof (uint32_t);
5391 		if (type == 0) { /* no timestamp */
5392 			IWK_DBG((IWK_DEBUG_EEPROM, "Event ID=%d, Data=%x0x",
5393 			    event_id, data1));
5394 		} else { /* timestamp */
5395 			data2 = iwk_mem_read(sc, logptr);
5396 			printf("Time=%d, Event ID=%d, Data=0x%x\n",
5397 			    data1, event_id, data2);
5398 			IWK_DBG((IWK_DEBUG_EEPROM,
5399 			    "Time=%d, Event ID=%d, Data=0x%x\n",
5400 			    data1, event_id, data2));
5401 			logptr += sizeof (uint32_t);
5402 		}
5403 	}
5404 
5405 	/*
5406 	 * Print the wrapped around entries, if any
5407 	 */
5408 	if (wraps) {
5409 		logptr = startptr;
5410 		for (i = 0; i < idx; i++) {
5411 			event_id = iwk_mem_read(sc, logptr);
5412 			logptr += sizeof (uint32_t);
5413 			data1 = iwk_mem_read(sc, logptr);
5414 			logptr += sizeof (uint32_t);
5415 			if (type == 0) { /* no timestamp */
5416 				IWK_DBG((IWK_DEBUG_EEPROM,
5417 				    "Event ID=%d, Data=%x0x", event_id, data1));
5418 			} else { /* timestamp */
5419 				data2 = iwk_mem_read(sc, logptr);
5420 				IWK_DBG((IWK_DEBUG_EEPROM,
5421 				    "Time = %d, Event ID=%d, Data=0x%x\n",
5422 				    data1, event_id, data2));
5423 				logptr += sizeof (uint32_t);
5424 			}
5425 		}
5426 	}
5427 
5428 	iwk_mac_access_exit(sc);
5429 }
5430 
5431 /*
5432  * error_event_table_ptr indicates base of the error log.  This contains
5433  * information about any uCode error that occurs.  For 4965, the format is:
5434  *
5435  * uint32_t valid;        (nonzero) valid, (0) log is empty
5436  * uint32_t error_id;     type of error
5437  * uint32_t pc;           program counter
5438  * uint32_t blink1;       branch link
5439  * uint32_t blink2;       branch link
5440  * uint32_t ilink1;       interrupt link
5441  * uint32_t ilink2;       interrupt link
5442  * uint32_t data1;        error-specific data
5443  * uint32_t data2;        error-specific data
5444  * uint32_t line;         source code line of error
5445  * uint32_t bcon_time;    beacon timer
5446  * uint32_t tsf_low;      network timestamp function timer
5447  * uint32_t tsf_hi;       network timestamp function timer
5448  */
5449 /*
5450  * iwk_write_error_log - Write error log to dmesg
5451  */
5452 static void iwk_write_error_log(iwk_sc_t *sc)
5453 {
5454 	uint32_t err_ptr;	/* Start address of error log */
5455 	uint32_t valid;		/* is error log valid */
5456 
5457 	err_ptr = sc->sc_card_alive_run.error_event_table_ptr;
5458 	if (!(err_ptr)) {
5459 		IWK_DBG((IWK_DEBUG_EEPROM, "NULL error table pointer\n"));
5460 		return;
5461 	}
5462 
5463 	iwk_mac_access_enter(sc);
5464 
5465 	valid = iwk_mem_read(sc, err_ptr);
5466 	if (!(valid)) {
5467 		IWK_DBG((IWK_DEBUG_EEPROM, "Error data not valid\n"));
5468 		iwk_mac_access_exit(sc);
5469 		return;
5470 	}
5471 	err_ptr += sizeof (uint32_t);
5472 	IWK_DBG((IWK_DEBUG_EEPROM, "err=%d ", iwk_mem_read(sc, err_ptr)));
5473 	err_ptr += sizeof (uint32_t);
5474 	IWK_DBG((IWK_DEBUG_EEPROM, "pc=0x%X ", iwk_mem_read(sc, err_ptr)));
5475 	err_ptr += sizeof (uint32_t);
5476 	IWK_DBG((IWK_DEBUG_EEPROM,
5477 	    "branch link1=0x%X ", iwk_mem_read(sc, err_ptr)));
5478 	err_ptr += sizeof (uint32_t);
5479 	IWK_DBG((IWK_DEBUG_EEPROM,
5480 	    "branch link2=0x%X ", iwk_mem_read(sc, err_ptr)));
5481 	err_ptr += sizeof (uint32_t);
5482 	IWK_DBG((IWK_DEBUG_EEPROM,
5483 	    "interrupt link1=0x%X ", iwk_mem_read(sc, err_ptr)));
5484 	err_ptr += sizeof (uint32_t);
5485 	IWK_DBG((IWK_DEBUG_EEPROM,
5486 	    "interrupt link2=0x%X ", iwk_mem_read(sc, err_ptr)));
5487 	err_ptr += sizeof (uint32_t);
5488 	IWK_DBG((IWK_DEBUG_EEPROM, "data1=0x%X ", iwk_mem_read(sc, err_ptr)));
5489 	err_ptr += sizeof (uint32_t);
5490 	IWK_DBG((IWK_DEBUG_EEPROM, "data2=0x%X ", iwk_mem_read(sc, err_ptr)));
5491 	err_ptr += sizeof (uint32_t);
5492 	IWK_DBG((IWK_DEBUG_EEPROM, "line=%d ", iwk_mem_read(sc, err_ptr)));
5493 	err_ptr += sizeof (uint32_t);
5494 	IWK_DBG((IWK_DEBUG_EEPROM, "bcon_time=%d ", iwk_mem_read(sc, err_ptr)));
5495 	err_ptr += sizeof (uint32_t);
5496 	IWK_DBG((IWK_DEBUG_EEPROM, "tsf_low=%d ", iwk_mem_read(sc, err_ptr)));
5497 	err_ptr += sizeof (uint32_t);
5498 	IWK_DBG((IWK_DEBUG_EEPROM, "tsf_hi=%d\n", iwk_mem_read(sc, err_ptr)));
5499 
5500 	iwk_mac_access_exit(sc);
5501 }
5502