xref: /titanic_51/usr/src/uts/common/io/iwk/iwk2.c (revision 6cefaae1e90a413ba01560575bb3998e1a3df40e)
1 /*
2  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright (c) 2007, Intel Corporation
8  * All rights reserved.
9  */
10 
11 /*
12  * Copyright (c) 2006
13  * Copyright (c) 2007
14  *	Damien Bergamini <damien.bergamini@free.fr>
15  *
16  * Permission to use, copy, modify, and distribute this software for any
17  * purpose with or without fee is hereby granted, provided that the above
18  * copyright notice and this permission notice appear in all copies.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
21  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
22  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
24  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
25  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
26  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27  */
28 
29 /*
30  * Driver for Intel PRO/Wireless 4965AGN(kedron) 802.11 network adapters.
31  */
32 
33 #include <sys/types.h>
34 #include <sys/byteorder.h>
35 #include <sys/conf.h>
36 #include <sys/cmn_err.h>
37 #include <sys/stat.h>
38 #include <sys/ddi.h>
39 #include <sys/sunddi.h>
40 #include <sys/strsubr.h>
41 #include <sys/ethernet.h>
42 #include <inet/common.h>
43 #include <inet/nd.h>
44 #include <inet/mi.h>
45 #include <sys/note.h>
46 #include <sys/stream.h>
47 #include <sys/strsun.h>
48 #include <sys/modctl.h>
49 #include <sys/devops.h>
50 #include <sys/dlpi.h>
51 #include <sys/mac.h>
52 #include <sys/mac_wifi.h>
53 #include <sys/net80211.h>
54 #include <sys/net80211_proto.h>
55 #include <sys/varargs.h>
56 #include <sys/policy.h>
57 #include <sys/pci.h>
58 
59 #include "iwk_calibration.h"
60 #include "iwk_hw.h"
61 #include "iwk_eeprom.h"
62 #include "iwk2_var.h"
63 #include <inet/wifi_ioctl.h>
64 
65 #ifdef DEBUG
66 #define	IWK_DEBUG_80211		(1 << 0)
67 #define	IWK_DEBUG_CMD		(1 << 1)
68 #define	IWK_DEBUG_DMA		(1 << 2)
69 #define	IWK_DEBUG_EEPROM	(1 << 3)
70 #define	IWK_DEBUG_FW		(1 << 4)
71 #define	IWK_DEBUG_HW		(1 << 5)
72 #define	IWK_DEBUG_INTR		(1 << 6)
73 #define	IWK_DEBUG_MRR		(1 << 7)
74 #define	IWK_DEBUG_PIO		(1 << 8)
75 #define	IWK_DEBUG_RX		(1 << 9)
76 #define	IWK_DEBUG_SCAN		(1 << 10)
77 #define	IWK_DEBUG_TX		(1 << 11)
78 #define	IWK_DEBUG_RATECTL	(1 << 12)
79 #define	IWK_DEBUG_RADIO		(1 << 13)
80 #define	IWK_DEBUG_RESUME	(1 << 14)
81 #define	IWK_DEBUG_CALIBRATION	(1 << 15)
82 uint32_t iwk_dbg_flags = 0;
83 #define	IWK_DBG(x) \
84 	iwk_dbg x
85 #else
86 #define	IWK_DBG(x)
87 #endif
88 
89 static void	*iwk_soft_state_p = NULL;
90 static uint8_t iwk_fw_bin [] = {
91 #include "fw-iw/iw4965.ucode.hex"
92 };
93 
94 /* DMA attributes for a shared page */
95 static ddi_dma_attr_t sh_dma_attr = {
96 	DMA_ATTR_V0,	/* version of this structure */
97 	0,		/* lowest usable address */
98 	0xffffffffU,	/* highest usable address */
99 	0xffffffffU,	/* maximum DMAable byte count */
100 	0x1000,		/* alignment in bytes */
101 	0x1000,		/* burst sizes (any?) */
102 	1,		/* minimum transfer */
103 	0xffffffffU,	/* maximum transfer */
104 	0xffffffffU,	/* maximum segment length */
105 	1,		/* maximum number of segments */
106 	1,		/* granularity */
107 	0,		/* flags (reserved) */
108 };
109 
110 /* DMA attributes for a keep warm DRAM descriptor */
111 static ddi_dma_attr_t kw_dma_attr = {
112 	DMA_ATTR_V0,	/* version of this structure */
113 	0,		/* lowest usable address */
114 	0xffffffffU,	/* highest usable address */
115 	0xffffffffU,	/* maximum DMAable byte count */
116 	0x1000,		/* alignment in bytes */
117 	0x1000,		/* burst sizes (any?) */
118 	1,		/* minimum transfer */
119 	0xffffffffU,	/* maximum transfer */
120 	0xffffffffU,	/* maximum segment length */
121 	1,		/* maximum number of segments */
122 	1,		/* granularity */
123 	0,		/* flags (reserved) */
124 };
125 
126 /* DMA attributes for a ring descriptor */
127 static ddi_dma_attr_t ring_desc_dma_attr = {
128 	DMA_ATTR_V0,	/* version of this structure */
129 	0,		/* lowest usable address */
130 	0xffffffffU,	/* highest usable address */
131 	0xffffffffU,	/* maximum DMAable byte count */
132 	0x100,		/* alignment in bytes */
133 	0x100,		/* burst sizes (any?) */
134 	1,		/* minimum transfer */
135 	0xffffffffU,	/* maximum transfer */
136 	0xffffffffU,	/* maximum segment length */
137 	1,		/* maximum number of segments */
138 	1,		/* granularity */
139 	0,		/* flags (reserved) */
140 };
141 
142 /* DMA attributes for a cmd */
143 static ddi_dma_attr_t cmd_dma_attr = {
144 	DMA_ATTR_V0,	/* version of this structure */
145 	0,		/* lowest usable address */
146 	0xffffffffU,	/* highest usable address */
147 	0xffffffffU,	/* maximum DMAable byte count */
148 	4,		/* alignment in bytes */
149 	0x100,		/* burst sizes (any?) */
150 	1,		/* minimum transfer */
151 	0xffffffffU,	/* maximum transfer */
152 	0xffffffffU,	/* maximum segment length */
153 	1,		/* maximum number of segments */
154 	1,		/* granularity */
155 	0,		/* flags (reserved) */
156 };
157 
158 /* DMA attributes for a rx buffer */
159 static ddi_dma_attr_t rx_buffer_dma_attr = {
160 	DMA_ATTR_V0,	/* version of this structure */
161 	0,		/* lowest usable address */
162 	0xffffffffU,	/* highest usable address */
163 	0xffffffffU,	/* maximum DMAable byte count */
164 	0x100,		/* alignment in bytes */
165 	0x100,		/* burst sizes (any?) */
166 	1,		/* minimum transfer */
167 	0xffffffffU,	/* maximum transfer */
168 	0xffffffffU,	/* maximum segment length */
169 	1,		/* maximum number of segments */
170 	1,		/* granularity */
171 	0,		/* flags (reserved) */
172 };
173 
174 /*
175  * DMA attributes for a tx buffer.
176  * the maximum number of segments is 4 for the hardware.
177  * now all the wifi drivers put the whole frame in a single
178  * descriptor, so we define the maximum  number of segments 1,
179  * just the same as the rx_buffer. we consider leverage the HW
180  * ability in the future, that is why we don't define rx and tx
181  * buffer_dma_attr as the same.
182  */
183 static ddi_dma_attr_t tx_buffer_dma_attr = {
184 	DMA_ATTR_V0,	/* version of this structure */
185 	0,		/* lowest usable address */
186 	0xffffffffU,	/* highest usable address */
187 	0xffffffffU,	/* maximum DMAable byte count */
188 	4,		/* alignment in bytes */
189 	0x100,		/* burst sizes (any?) */
190 	1,		/* minimum transfer */
191 	0xffffffffU,	/* maximum transfer */
192 	0xffffffffU,	/* maximum segment length */
193 	1,		/* maximum number of segments */
194 	1,		/* granularity */
195 	0,		/* flags (reserved) */
196 };
197 
198 /* DMA attributes for text and data part in the firmware */
199 static ddi_dma_attr_t fw_dma_attr = {
200 	DMA_ATTR_V0,	/* version of this structure */
201 	0,		/* lowest usable address */
202 	0xffffffffU,	/* highest usable address */
203 	0x7fffffff,	/* maximum DMAable byte count */
204 	0x10,		/* alignment in bytes */
205 	0x100,		/* burst sizes (any?) */
206 	1,		/* minimum transfer */
207 	0xffffffffU,	/* maximum transfer */
208 	0xffffffffU,	/* maximum segment length */
209 	1,		/* maximum number of segments */
210 	1,		/* granularity */
211 	0,		/* flags (reserved) */
212 };
213 
214 
215 /* regs access attributes */
216 static ddi_device_acc_attr_t iwk_reg_accattr = {
217 	DDI_DEVICE_ATTR_V0,
218 	DDI_STRUCTURE_LE_ACC,
219 	DDI_STRICTORDER_ACC,
220 	DDI_DEFAULT_ACC
221 };
222 
223 /* DMA access attributes */
224 static ddi_device_acc_attr_t iwk_dma_accattr = {
225 	DDI_DEVICE_ATTR_V0,
226 	DDI_NEVERSWAP_ACC,
227 	DDI_STRICTORDER_ACC,
228 	DDI_DEFAULT_ACC
229 };
230 
231 static int	iwk_ring_init(iwk_sc_t *);
232 static void	iwk_ring_free(iwk_sc_t *);
233 static int	iwk_alloc_shared(iwk_sc_t *);
234 static void	iwk_free_shared(iwk_sc_t *);
235 static int	iwk_alloc_kw(iwk_sc_t *);
236 static void	iwk_free_kw(iwk_sc_t *);
237 static int	iwk_alloc_fw_dma(iwk_sc_t *);
238 static void	iwk_free_fw_dma(iwk_sc_t *);
239 static int	iwk_alloc_rx_ring(iwk_sc_t *);
240 static void	iwk_reset_rx_ring(iwk_sc_t *);
241 static void	iwk_free_rx_ring(iwk_sc_t *);
242 static int	iwk_alloc_tx_ring(iwk_sc_t *, iwk_tx_ring_t *,
243     int, int);
244 static void	iwk_reset_tx_ring(iwk_sc_t *, iwk_tx_ring_t *);
245 static void	iwk_free_tx_ring(iwk_sc_t *, iwk_tx_ring_t *);
246 
247 static ieee80211_node_t *iwk_node_alloc(ieee80211com_t *);
248 static void	iwk_node_free(ieee80211_node_t *);
249 static int	iwk_newstate(ieee80211com_t *, enum ieee80211_state, int);
250 static int	iwk_key_set(ieee80211com_t *, const struct ieee80211_key *,
251     const uint8_t mac[IEEE80211_ADDR_LEN]);
252 static void	iwk_mac_access_enter(iwk_sc_t *);
253 static void	iwk_mac_access_exit(iwk_sc_t *);
254 static uint32_t	iwk_reg_read(iwk_sc_t *, uint32_t);
255 static void	iwk_reg_write(iwk_sc_t *, uint32_t, uint32_t);
256 static void	iwk_reg_write_region_4(iwk_sc_t *, uint32_t,
257 		    uint32_t *, int);
258 static int	iwk_load_firmware(iwk_sc_t *);
259 static void	iwk_rx_intr(iwk_sc_t *, iwk_rx_desc_t *,
260 		    iwk_rx_data_t *);
261 static void	iwk_tx_intr(iwk_sc_t *, iwk_rx_desc_t *,
262 		    iwk_rx_data_t *);
263 static void	iwk_cmd_intr(iwk_sc_t *, iwk_rx_desc_t *);
264 static uint_t   iwk_intr(caddr_t, caddr_t);
265 static int	iwk_eep_load(iwk_sc_t *sc);
266 static void	iwk_get_mac_from_eep(iwk_sc_t *sc);
267 static int	iwk_eep_sem_down(iwk_sc_t *sc);
268 static void	iwk_eep_sem_up(iwk_sc_t *sc);
269 static uint_t   iwk_rx_softintr(caddr_t, caddr_t);
270 static uint8_t	iwk_rate_to_plcp(int);
271 static int	iwk_cmd(iwk_sc_t *, int, const void *, int, int);
272 static void	iwk_set_led(iwk_sc_t *, uint8_t, uint8_t, uint8_t);
273 static int	iwk_hw_set_before_auth(iwk_sc_t *);
274 static int	iwk_scan(iwk_sc_t *);
275 static int	iwk_config(iwk_sc_t *);
276 static void	iwk_stop_master(iwk_sc_t *);
277 static int	iwk_power_up(iwk_sc_t *);
278 static int	iwk_preinit(iwk_sc_t *);
279 static int	iwk_init(iwk_sc_t *);
280 static void	iwk_stop(iwk_sc_t *);
281 static void	iwk_amrr_init(iwk_amrr_t *);
282 static void	iwk_amrr_timeout(iwk_sc_t *);
283 static void	iwk_amrr_ratectl(void *, ieee80211_node_t *);
284 static int32_t	iwk_curr_tempera(iwk_sc_t *sc);
285 static int	iwk_tx_power_calibration(iwk_sc_t *sc);
286 static inline int	iwk_is_24G_band(iwk_sc_t *sc);
287 static inline int	iwk_is_fat_channel(iwk_sc_t *sc);
288 static int	iwk_txpower_grp(uint16_t channel);
289 static struct	iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc,
290     uint16_t channel,
291     int is_24G, int is_fat, int is_hi_chan);
292 static int32_t	iwk_band_number(iwk_sc_t *sc, uint16_t channel);
293 static int	iwk_division(int32_t num, int32_t denom, int32_t *res);
294 static int32_t	iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1,
295     int32_t x2, int32_t y2);
296 static int	iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel,
297     struct iwk_eep_calib_channel_info *chan_info);
298 static int32_t	iwk_voltage_compensation(int32_t eep_voltage,
299     int32_t curr_voltage);
300 static int32_t	iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G);
301 static int	iwk_txpower_table_cmd_init(iwk_sc_t *sc,
302     struct iwk_tx_power_db *tp_db);
303 static void	iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc);
304 static int	iwk_is_associated(iwk_sc_t *sc);
305 static int	iwk_rxgain_diff_init(iwk_sc_t *sc);
306 static int	iwk_rxgain_diff(iwk_sc_t *sc);
307 static int	iwk_rx_sens_init(iwk_sc_t *sc);
308 static int	iwk_rx_sens(iwk_sc_t *sc);
309 static int	iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time);
310 static int	iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time);
311 
312 static void	iwk_write_event_log(iwk_sc_t *);
313 static void	iwk_write_error_log(iwk_sc_t *);
314 
315 static int	iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
316 static int	iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
317 static int	iwk_quiesce(dev_info_t *dip);
318 
319 /*
320  * GLD specific operations
321  */
322 static int	iwk_m_stat(void *arg, uint_t stat, uint64_t *val);
323 static int	iwk_m_start(void *arg);
324 static void	iwk_m_stop(void *arg);
325 static int	iwk_m_unicst(void *arg, const uint8_t *macaddr);
326 static int	iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m);
327 static int	iwk_m_promisc(void *arg, boolean_t on);
328 static mblk_t 	*iwk_m_tx(void *arg, mblk_t *mp);
329 static void	iwk_m_ioctl(void *arg, queue_t *wq, mblk_t *mp);
330 static int	iwk_m_setprop(void *arg, const char *pr_name,
331 	mac_prop_id_t wldp_pr_name, uint_t wldp_length, const void *wldp_buf);
332 static int	iwk_m_getprop(void *arg, const char *pr_name,
333 	mac_prop_id_t wldp_pr_name, uint_t pr_flags, uint_t wldp_length,
334 	void *wldp_buf, uint_t *perm);
335 static void	iwk_destroy_locks(iwk_sc_t *sc);
336 static int	iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type);
337 static void	iwk_thread(iwk_sc_t *sc);
338 
339 /*
340  * Supported rates for 802.11b/g modes (in 500Kbps unit).
341  * 11a and 11n support will be added later.
342  */
343 static const struct ieee80211_rateset iwk_rateset_11b =
344 	{ 4, { 2, 4, 11, 22 } };
345 
346 static const struct ieee80211_rateset iwk_rateset_11g =
347 	{ 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
348 
349 /*
350  * For mfthread only
351  */
352 extern pri_t minclsyspri;
353 
354 #define	DRV_NAME_4965	"iwk"
355 
356 /*
357  * Module Loading Data & Entry Points
358  */
359 DDI_DEFINE_STREAM_OPS(iwk_devops, nulldev, nulldev, iwk_attach,
360     iwk_detach, nodev, NULL, D_MP, NULL, iwk_quiesce);
361 
362 static struct modldrv iwk_modldrv = {
363 	&mod_driverops,
364 	"Intel(R) 4965AGN driver(N)",
365 	&iwk_devops
366 };
367 
368 static struct modlinkage iwk_modlinkage = {
369 	MODREV_1,
370 	&iwk_modldrv,
371 	NULL
372 };
373 
374 int
375 _init(void)
376 {
377 	int	status;
378 
379 	status = ddi_soft_state_init(&iwk_soft_state_p,
380 	    sizeof (iwk_sc_t), 1);
381 	if (status != DDI_SUCCESS)
382 		return (status);
383 
384 	mac_init_ops(&iwk_devops, DRV_NAME_4965);
385 	status = mod_install(&iwk_modlinkage);
386 	if (status != DDI_SUCCESS) {
387 		mac_fini_ops(&iwk_devops);
388 		ddi_soft_state_fini(&iwk_soft_state_p);
389 	}
390 
391 	return (status);
392 }
393 
394 int
395 _fini(void)
396 {
397 	int status;
398 
399 	status = mod_remove(&iwk_modlinkage);
400 	if (status == DDI_SUCCESS) {
401 		mac_fini_ops(&iwk_devops);
402 		ddi_soft_state_fini(&iwk_soft_state_p);
403 	}
404 
405 	return (status);
406 }
407 
408 int
409 _info(struct modinfo *mip)
410 {
411 	return (mod_info(&iwk_modlinkage, mip));
412 }
413 
414 /*
415  * Mac Call Back entries
416  */
417 mac_callbacks_t	iwk_m_callbacks = {
418 	MC_IOCTL | MC_SETPROP | MC_GETPROP,
419 	iwk_m_stat,
420 	iwk_m_start,
421 	iwk_m_stop,
422 	iwk_m_promisc,
423 	iwk_m_multicst,
424 	iwk_m_unicst,
425 	iwk_m_tx,
426 	NULL,
427 	iwk_m_ioctl,
428 	NULL,
429 	NULL,
430 	NULL,
431 	iwk_m_setprop,
432 	iwk_m_getprop
433 };
434 
435 #ifdef DEBUG
436 void
437 iwk_dbg(uint32_t flags, const char *fmt, ...)
438 {
439 	va_list	ap;
440 
441 	if (flags & iwk_dbg_flags) {
442 		va_start(ap, fmt);
443 		vcmn_err(CE_NOTE, fmt, ap);
444 		va_end(ap);
445 	}
446 }
447 #endif
448 
449 /*
450  * device operations
451  */
452 int
453 iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
454 {
455 	iwk_sc_t		*sc;
456 	ieee80211com_t	*ic;
457 	int			instance, err, i;
458 	char			strbuf[32];
459 	wifi_data_t		wd = { 0 };
460 	mac_register_t		*macp;
461 
462 	int			intr_type;
463 	int			intr_count;
464 	int			intr_actual;
465 
466 	switch (cmd) {
467 	case DDI_ATTACH:
468 		break;
469 	case DDI_RESUME:
470 		sc = ddi_get_soft_state(iwk_soft_state_p,
471 		    ddi_get_instance(dip));
472 		ASSERT(sc != NULL);
473 		mutex_enter(&sc->sc_glock);
474 		sc->sc_flags &= ~IWK_F_SUSPEND;
475 		mutex_exit(&sc->sc_glock);
476 		if (sc->sc_flags & IWK_F_RUNNING) {
477 			(void) iwk_init(sc);
478 			ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
479 		}
480 		IWK_DBG((IWK_DEBUG_RESUME, "iwk: resume\n"));
481 		return (DDI_SUCCESS);
482 	default:
483 		err = DDI_FAILURE;
484 		goto attach_fail1;
485 	}
486 
487 	instance = ddi_get_instance(dip);
488 	err = ddi_soft_state_zalloc(iwk_soft_state_p, instance);
489 	if (err != DDI_SUCCESS) {
490 		cmn_err(CE_WARN,
491 		    "iwk_attach(): failed to allocate soft state\n");
492 		goto attach_fail1;
493 	}
494 	sc = ddi_get_soft_state(iwk_soft_state_p, instance);
495 	sc->sc_dip = dip;
496 
497 	err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0,
498 	    &iwk_reg_accattr, &sc->sc_cfg_handle);
499 	if (err != DDI_SUCCESS) {
500 		cmn_err(CE_WARN,
501 		    "iwk_attach(): failed to map config spaces regs\n");
502 		goto attach_fail2;
503 	}
504 	sc->sc_rev = ddi_get8(sc->sc_cfg_handle,
505 	    (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID));
506 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0x41), 0);
507 	sc->sc_clsz = ddi_get16(sc->sc_cfg_handle,
508 	    (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ));
509 	if (!sc->sc_clsz)
510 		sc->sc_clsz = 16;
511 	sc->sc_clsz = (sc->sc_clsz << 2);
512 	sc->sc_dmabuf_sz = roundup(0x1000 + sizeof (struct ieee80211_frame) +
513 	    IEEE80211_MTU + IEEE80211_CRC_LEN +
514 	    (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
515 	    IEEE80211_WEP_CRCLEN), sc->sc_clsz);
516 	/*
517 	 * Map operating registers
518 	 */
519 	err = ddi_regs_map_setup(dip, 1, &sc->sc_base,
520 	    0, 0, &iwk_reg_accattr, &sc->sc_handle);
521 	if (err != DDI_SUCCESS) {
522 		cmn_err(CE_WARN,
523 		    "iwk_attach(): failed to map device regs\n");
524 		goto attach_fail2a;
525 	}
526 
527 	err = ddi_intr_get_supported_types(dip, &intr_type);
528 	if ((err != DDI_SUCCESS) || (!(intr_type & DDI_INTR_TYPE_FIXED))) {
529 		cmn_err(CE_WARN, "iwk_attach(): "
530 		    "Fixed type interrupt is not supported\n");
531 		goto attach_fail_intr_a;
532 	}
533 
534 	err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &intr_count);
535 	if ((err != DDI_SUCCESS) || (intr_count != 1)) {
536 		cmn_err(CE_WARN, "iwk_attach(): "
537 		    "No fixed interrupts\n");
538 		goto attach_fail_intr_a;
539 	}
540 
541 	sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
542 
543 	err = ddi_intr_alloc(dip, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0,
544 	    intr_count, &intr_actual, 0);
545 	if ((err != DDI_SUCCESS) || (intr_actual != 1)) {
546 		cmn_err(CE_WARN, "iwk_attach(): "
547 		    "ddi_intr_alloc() failed 0x%x\n", err);
548 		goto attach_fail_intr_b;
549 	}
550 
551 	err = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri);
552 	if (err != DDI_SUCCESS) {
553 		cmn_err(CE_WARN, "iwk_attach(): "
554 		    "ddi_intr_get_pri() failed 0x%x\n", err);
555 		goto attach_fail_intr_c;
556 	}
557 
558 	mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER,
559 	    DDI_INTR_PRI(sc->sc_intr_pri));
560 	mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER,
561 	    DDI_INTR_PRI(sc->sc_intr_pri));
562 	mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER,
563 	    DDI_INTR_PRI(sc->sc_intr_pri));
564 
565 	cv_init(&sc->sc_fw_cv, NULL, CV_DRIVER, NULL);
566 	cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL);
567 	cv_init(&sc->sc_tx_cv, "tx-ring", CV_DRIVER, NULL);
568 	/*
569 	 * initialize the mfthread
570 	 */
571 	cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL);
572 	sc->sc_mf_thread = NULL;
573 	sc->sc_mf_thread_switch = 0;
574 
575 	/*
576 	 * Allocate shared page.
577 	 */
578 	err = iwk_alloc_shared(sc);
579 	if (err != DDI_SUCCESS) {
580 		cmn_err(CE_WARN, "iwk_attach(): "
581 		    "failed to allocate shared page\n");
582 		goto attach_fail3;
583 	}
584 
585 	/*
586 	 * Allocate keep warm page.
587 	 */
588 	err = iwk_alloc_kw(sc);
589 	if (err != DDI_SUCCESS) {
590 		cmn_err(CE_WARN, "iwk_attach(): "
591 		    "failed to allocate keep warm page\n");
592 		goto attach_fail3a;
593 	}
594 
595 	/*
596 	 * Do some necessary hardware initializations.
597 	 */
598 	err = iwk_preinit(sc);
599 	if (err != DDI_SUCCESS) {
600 		cmn_err(CE_WARN, "iwk_attach(): "
601 		    "failed to init hardware\n");
602 		goto attach_fail4;
603 	}
604 
605 	/* initialize EEPROM */
606 	err = iwk_eep_load(sc);  /* get hardware configurations from eeprom */
607 	if (err != 0) {
608 		cmn_err(CE_WARN, "iwk_attach(): failed to load eeprom\n");
609 		goto attach_fail4;
610 	}
611 
612 	if (sc->sc_eep_map.calib_version < EEP_TX_POWER_VERSION_NEW) {
613 		cmn_err(CE_WARN, "older EEPROM detected\n");
614 		goto attach_fail4;
615 	}
616 
617 	iwk_get_mac_from_eep(sc);
618 
619 	err = iwk_ring_init(sc);
620 	if (err != DDI_SUCCESS) {
621 		cmn_err(CE_WARN, "iwk_attach(): "
622 		    "failed to allocate and initialize ring\n");
623 		goto attach_fail4;
624 	}
625 
626 	sc->sc_hdr = (iwk_firmware_hdr_t *)iwk_fw_bin;
627 
628 	err = iwk_alloc_fw_dma(sc);
629 	if (err != DDI_SUCCESS) {
630 		cmn_err(CE_WARN, "iwk_attach(): "
631 		    "failed to allocate firmware dma\n");
632 		goto attach_fail5;
633 	}
634 
635 	/*
636 	 * Initialize the wifi part, which will be used by
637 	 * generic layer
638 	 */
639 	ic = &sc->sc_ic;
640 	ic->ic_phytype  = IEEE80211_T_OFDM;
641 	ic->ic_opmode   = IEEE80211_M_STA; /* default to BSS mode */
642 	ic->ic_state    = IEEE80211_S_INIT;
643 	ic->ic_maxrssi  = 100; /* experimental number */
644 	ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT |
645 	    IEEE80211_C_PMGT | IEEE80211_C_SHSLOT;
646 	/*
647 	 * use software WEP and TKIP, hardware CCMP;
648 	 */
649 	ic->ic_caps |= IEEE80211_C_AES_CCM;
650 	/*
651 	 * Support WPA/WPA2
652 	 */
653 	ic->ic_caps |= IEEE80211_C_WPA;
654 
655 	/* set supported .11b and .11g rates */
656 	ic->ic_sup_rates[IEEE80211_MODE_11B] = iwk_rateset_11b;
657 	ic->ic_sup_rates[IEEE80211_MODE_11G] = iwk_rateset_11g;
658 
659 	/* set supported .11b and .11g channels (1 through 11) */
660 	for (i = 1; i <= 11; i++) {
661 		ic->ic_sup_channels[i].ich_freq =
662 		    ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
663 		ic->ic_sup_channels[i].ich_flags =
664 		    IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
665 		    IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ |
666 		    IEEE80211_CHAN_PASSIVE;
667 	}
668 
669 	ic->ic_xmit = iwk_send;
670 	/*
671 	 * init Wifi layer
672 	 */
673 	ieee80211_attach(ic);
674 
675 	/*
676 	 * different instance has different WPA door
677 	 */
678 	(void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
679 	    ddi_driver_name(dip),
680 	    ddi_get_instance(dip));
681 
682 	/*
683 	 * Override 80211 default routines
684 	 */
685 	sc->sc_newstate = ic->ic_newstate;
686 	ic->ic_newstate = iwk_newstate;
687 	sc->sc_recv_mgmt = ic->ic_recv_mgmt;
688 	ic->ic_node_alloc = iwk_node_alloc;
689 	ic->ic_node_free = iwk_node_free;
690 	ic->ic_crypto.cs_key_set = iwk_key_set;
691 	ieee80211_media_init(ic);
692 	/*
693 	 * initialize default tx key
694 	 */
695 	ic->ic_def_txkey = 0;
696 	err = ddi_intr_add_softint(dip, &sc->sc_soft_hdl, DDI_INTR_SOFTPRI_MAX,
697 	    iwk_rx_softintr, (caddr_t)sc);
698 	if (err != DDI_SUCCESS) {
699 		cmn_err(CE_WARN, "iwk_attach(): "
700 		    "add soft interrupt failed\n");
701 		goto attach_fail7;
702 	}
703 
704 	/*
705 	 * Add the interrupt handler
706 	 */
707 	err = ddi_intr_add_handler(sc->sc_intr_htable[0], iwk_intr,
708 	    (caddr_t)sc, NULL);
709 	if (err != DDI_SUCCESS) {
710 		cmn_err(CE_WARN, "iwk_attach(): "
711 		    "ddi_intr_add_handle() failed\n");
712 		goto attach_fail8;
713 	}
714 
715 	err = ddi_intr_enable(sc->sc_intr_htable[0]);
716 	if (err != DDI_SUCCESS) {
717 		cmn_err(CE_WARN, "iwk_attach(): "
718 		    "ddi_intr_enable() failed\n");
719 		goto attach_fail_intr_d;
720 	}
721 
722 	/*
723 	 * Initialize pointer to device specific functions
724 	 */
725 	wd.wd_secalloc = WIFI_SEC_NONE;
726 	wd.wd_opmode = ic->ic_opmode;
727 	IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr);
728 
729 	macp = mac_alloc(MAC_VERSION);
730 	if (err != DDI_SUCCESS) {
731 		cmn_err(CE_WARN,
732 		    "iwk_attach(): failed to do mac_alloc()\n");
733 		goto attach_fail9;
734 	}
735 
736 	macp->m_type_ident	= MAC_PLUGIN_IDENT_WIFI;
737 	macp->m_driver		= sc;
738 	macp->m_dip		= dip;
739 	macp->m_src_addr	= ic->ic_macaddr;
740 	macp->m_callbacks	= &iwk_m_callbacks;
741 	macp->m_min_sdu		= 0;
742 	macp->m_max_sdu		= IEEE80211_MTU;
743 	macp->m_pdata		= &wd;
744 	macp->m_pdata_size	= sizeof (wd);
745 
746 	/*
747 	 * Register the macp to mac
748 	 */
749 	err = mac_register(macp, &ic->ic_mach);
750 	mac_free(macp);
751 	if (err != DDI_SUCCESS) {
752 		cmn_err(CE_WARN,
753 		    "iwk_attach(): failed to do mac_register()\n");
754 		goto attach_fail9;
755 	}
756 
757 	/*
758 	 * Create minor node of type DDI_NT_NET_WIFI
759 	 */
760 	(void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_4965"%d", instance);
761 	err = ddi_create_minor_node(dip, strbuf, S_IFCHR,
762 	    instance + 1, DDI_NT_NET_WIFI, 0);
763 	if (err != DDI_SUCCESS)
764 		cmn_err(CE_WARN,
765 		    "iwk_attach(): failed to do ddi_create_minor_node()\n");
766 
767 	/*
768 	 * Notify link is down now
769 	 */
770 	mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
771 
772 	/*
773 	 * create the mf thread to handle the link status,
774 	 * recovery fatal error, etc.
775 	 */
776 	sc->sc_mf_thread_switch = 1;
777 	if (sc->sc_mf_thread == NULL)
778 		sc->sc_mf_thread = thread_create((caddr_t)NULL, 0,
779 		    iwk_thread, sc, 0, &p0, TS_RUN, minclsyspri);
780 
781 	sc->sc_flags |= IWK_F_ATTACHED;
782 
783 	return (DDI_SUCCESS);
784 attach_fail9:
785 	(void) ddi_intr_disable(sc->sc_intr_htable[0]);
786 attach_fail_intr_d:
787 	(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
788 
789 attach_fail8:
790 	(void) ddi_intr_remove_softint(sc->sc_soft_hdl);
791 	sc->sc_soft_hdl = NULL;
792 attach_fail7:
793 	ieee80211_detach(ic);
794 attach_fail6:
795 	iwk_free_fw_dma(sc);
796 attach_fail5:
797 	iwk_ring_free(sc);
798 attach_fail4:
799 	iwk_free_kw(sc);
800 attach_fail3a:
801 	iwk_free_shared(sc);
802 attach_fail3:
803 	iwk_destroy_locks(sc);
804 attach_fail_intr_c:
805 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
806 attach_fail_intr_b:
807 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
808 attach_fail_intr_a:
809 	ddi_regs_map_free(&sc->sc_handle);
810 attach_fail2a:
811 	ddi_regs_map_free(&sc->sc_cfg_handle);
812 attach_fail2:
813 	ddi_soft_state_free(iwk_soft_state_p, instance);
814 attach_fail1:
815 	return (err);
816 }
817 
818 int
819 iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
820 {
821 	iwk_sc_t	*sc;
822 	int err;
823 
824 	sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip));
825 	ASSERT(sc != NULL);
826 
827 	switch (cmd) {
828 	case DDI_DETACH:
829 		break;
830 	case DDI_SUSPEND:
831 		if (sc->sc_flags & IWK_F_RUNNING) {
832 			iwk_stop(sc);
833 		}
834 		mutex_enter(&sc->sc_glock);
835 		sc->sc_flags |= IWK_F_SUSPEND;
836 		mutex_exit(&sc->sc_glock);
837 		IWK_DBG((IWK_DEBUG_RESUME, "iwk: suspend\n"));
838 		return (DDI_SUCCESS);
839 	default:
840 		return (DDI_FAILURE);
841 	}
842 
843 	if (!(sc->sc_flags & IWK_F_ATTACHED))
844 		return (DDI_FAILURE);
845 
846 	err = mac_disable(sc->sc_ic.ic_mach);
847 	if (err != DDI_SUCCESS)
848 		return (err);
849 
850 	/*
851 	 * Destroy the mf_thread
852 	 */
853 	mutex_enter(&sc->sc_mt_lock);
854 	sc->sc_mf_thread_switch = 0;
855 	while (sc->sc_mf_thread != NULL) {
856 		if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0)
857 			break;
858 	}
859 	mutex_exit(&sc->sc_mt_lock);
860 
861 	iwk_stop(sc);
862 	DELAY(500000);
863 
864 	/*
865 	 * Unregiste from the MAC layer subsystem
866 	 */
867 	(void) mac_unregister(sc->sc_ic.ic_mach);
868 
869 	mutex_enter(&sc->sc_glock);
870 	iwk_free_fw_dma(sc);
871 	iwk_ring_free(sc);
872 	iwk_free_kw(sc);
873 	iwk_free_shared(sc);
874 	mutex_exit(&sc->sc_glock);
875 
876 	(void) ddi_intr_disable(sc->sc_intr_htable[0]);
877 	(void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
878 	(void) ddi_intr_free(sc->sc_intr_htable[0]);
879 	kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
880 
881 	(void) ddi_intr_remove_softint(sc->sc_soft_hdl);
882 	sc->sc_soft_hdl = NULL;
883 
884 	/*
885 	 * detach ieee80211
886 	 */
887 	ieee80211_detach(&sc->sc_ic);
888 
889 	iwk_destroy_locks(sc);
890 
891 	ddi_regs_map_free(&sc->sc_handle);
892 	ddi_regs_map_free(&sc->sc_cfg_handle);
893 	ddi_remove_minor_node(dip, NULL);
894 	ddi_soft_state_free(iwk_soft_state_p, ddi_get_instance(dip));
895 
896 	return (DDI_SUCCESS);
897 }
898 
899 /*
900  * quiesce(9E) entry point.
901  *
902  * This function is called when the system is single-threaded at high
903  * PIL with preemption disabled. Therefore, this function must not be
904  * blocked.
905  *
906  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
907  * DDI_FAILURE indicates an error condition and should almost never happen.
908  */
909 int
910 iwk_quiesce(dev_info_t *dip)
911 {
912 	iwk_sc_t	*sc;
913 
914 	sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip));
915 	ASSERT(sc != NULL);
916 
917 	/* no message prints and no lock accquisition */
918 #ifdef DEBUG
919 	iwk_dbg_flags = 0;
920 #endif
921 	sc->sc_flags |= IWK_F_QUIESCED;
922 
923 	iwk_stop(sc);
924 
925 	return (DDI_SUCCESS);
926 }
927 
928 static void
929 iwk_destroy_locks(iwk_sc_t *sc)
930 {
931 	cv_destroy(&sc->sc_mt_cv);
932 	mutex_destroy(&sc->sc_mt_lock);
933 	cv_destroy(&sc->sc_tx_cv);
934 	cv_destroy(&sc->sc_cmd_cv);
935 	cv_destroy(&sc->sc_fw_cv);
936 	mutex_destroy(&sc->sc_tx_lock);
937 	mutex_destroy(&sc->sc_glock);
938 }
939 
940 /*
941  * Allocate an area of memory and a DMA handle for accessing it
942  */
943 static int
944 iwk_alloc_dma_mem(iwk_sc_t *sc, size_t memsize,
945     ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p,
946     uint_t dma_flags, iwk_dma_t *dma_p)
947 {
948 	caddr_t vaddr;
949 	int err;
950 
951 	/*
952 	 * Allocate handle
953 	 */
954 	err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p,
955 	    DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
956 	if (err != DDI_SUCCESS) {
957 		dma_p->dma_hdl = NULL;
958 		return (DDI_FAILURE);
959 	}
960 
961 	/*
962 	 * Allocate memory
963 	 */
964 	err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
965 	    dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
966 	    DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
967 	if (err != DDI_SUCCESS) {
968 		ddi_dma_free_handle(&dma_p->dma_hdl);
969 		dma_p->dma_hdl = NULL;
970 		dma_p->acc_hdl = NULL;
971 		return (DDI_FAILURE);
972 	}
973 
974 	/*
975 	 * Bind the two together
976 	 */
977 	dma_p->mem_va = vaddr;
978 	err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
979 	    vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
980 	    &dma_p->cookie, &dma_p->ncookies);
981 	if (err != DDI_DMA_MAPPED) {
982 		ddi_dma_mem_free(&dma_p->acc_hdl);
983 		ddi_dma_free_handle(&dma_p->dma_hdl);
984 		dma_p->acc_hdl = NULL;
985 		dma_p->dma_hdl = NULL;
986 		return (DDI_FAILURE);
987 	}
988 
989 	dma_p->nslots = ~0U;
990 	dma_p->size = ~0U;
991 	dma_p->token = ~0U;
992 	dma_p->offset = 0;
993 	return (DDI_SUCCESS);
994 }
995 
996 /*
997  * Free one allocated area of DMAable memory
998  */
999 static void
1000 iwk_free_dma_mem(iwk_dma_t *dma_p)
1001 {
1002 	if (dma_p->dma_hdl != NULL) {
1003 		if (dma_p->ncookies) {
1004 			(void) ddi_dma_unbind_handle(dma_p->dma_hdl);
1005 			dma_p->ncookies = 0;
1006 		}
1007 		ddi_dma_free_handle(&dma_p->dma_hdl);
1008 		dma_p->dma_hdl = NULL;
1009 	}
1010 
1011 	if (dma_p->acc_hdl != NULL) {
1012 		ddi_dma_mem_free(&dma_p->acc_hdl);
1013 		dma_p->acc_hdl = NULL;
1014 	}
1015 }
1016 
1017 /*
1018  *
1019  */
1020 static int
1021 iwk_alloc_fw_dma(iwk_sc_t *sc)
1022 {
1023 	int err = DDI_SUCCESS;
1024 	iwk_dma_t *dma_p;
1025 	char *t;
1026 
1027 	/*
1028 	 * firmware image layout:
1029 	 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->|
1030 	 */
1031 	t = (char *)(sc->sc_hdr + 1);
1032 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz),
1033 	    &fw_dma_attr, &iwk_dma_accattr,
1034 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1035 	    &sc->sc_dma_fw_text);
1036 	dma_p = &sc->sc_dma_fw_text;
1037 	IWK_DBG((IWK_DEBUG_DMA, "text[ncookies:%d addr:%lx size:%lx]\n",
1038 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1039 	    dma_p->cookie.dmac_size));
1040 	if (err != DDI_SUCCESS) {
1041 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1042 		    " text dma memory");
1043 		goto fail;
1044 	}
1045 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->textsz));
1046 
1047 	t += LE_32(sc->sc_hdr->textsz);
1048 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1049 	    &fw_dma_attr, &iwk_dma_accattr,
1050 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1051 	    &sc->sc_dma_fw_data);
1052 	dma_p = &sc->sc_dma_fw_data;
1053 	IWK_DBG((IWK_DEBUG_DMA, "data[ncookies:%d addr:%lx size:%lx]\n",
1054 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1055 	    dma_p->cookie.dmac_size));
1056 	if (err != DDI_SUCCESS) {
1057 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1058 		    " data dma memory");
1059 		goto fail;
1060 	}
1061 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1062 
1063 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1064 	    &fw_dma_attr, &iwk_dma_accattr,
1065 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1066 	    &sc->sc_dma_fw_data_bak);
1067 	dma_p = &sc->sc_dma_fw_data_bak;
1068 	IWK_DBG((IWK_DEBUG_DMA, "data_bak[ncookies:%d addr:%lx "
1069 	    "size:%lx]\n",
1070 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1071 	    dma_p->cookie.dmac_size));
1072 	if (err != DDI_SUCCESS) {
1073 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1074 		    " data bakeup dma memory");
1075 		goto fail;
1076 	}
1077 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1078 
1079 	t += LE_32(sc->sc_hdr->datasz);
1080 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz),
1081 	    &fw_dma_attr, &iwk_dma_accattr,
1082 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1083 	    &sc->sc_dma_fw_init_text);
1084 	dma_p = &sc->sc_dma_fw_init_text;
1085 	IWK_DBG((IWK_DEBUG_DMA, "init_text[ncookies:%d addr:%lx "
1086 	    "size:%lx]\n",
1087 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1088 	    dma_p->cookie.dmac_size));
1089 	if (err != DDI_SUCCESS) {
1090 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1091 		    "init text dma memory");
1092 		goto fail;
1093 	}
1094 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_textsz));
1095 
1096 	t += LE_32(sc->sc_hdr->init_textsz);
1097 	err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz),
1098 	    &fw_dma_attr, &iwk_dma_accattr,
1099 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1100 	    &sc->sc_dma_fw_init_data);
1101 	dma_p = &sc->sc_dma_fw_init_data;
1102 	IWK_DBG((IWK_DEBUG_DMA, "init_data[ncookies:%d addr:%lx "
1103 	    "size:%lx]\n",
1104 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1105 	    dma_p->cookie.dmac_size));
1106 	if (err != DDI_SUCCESS) {
1107 		cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1108 		    "init data dma memory");
1109 		goto fail;
1110 	}
1111 	(void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_datasz));
1112 
1113 	sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz);
1114 fail:
1115 	return (err);
1116 }
1117 
1118 static void
1119 iwk_free_fw_dma(iwk_sc_t *sc)
1120 {
1121 	iwk_free_dma_mem(&sc->sc_dma_fw_text);
1122 	iwk_free_dma_mem(&sc->sc_dma_fw_data);
1123 	iwk_free_dma_mem(&sc->sc_dma_fw_data_bak);
1124 	iwk_free_dma_mem(&sc->sc_dma_fw_init_text);
1125 	iwk_free_dma_mem(&sc->sc_dma_fw_init_data);
1126 }
1127 
1128 /*
1129  * Allocate a shared page between host and NIC.
1130  */
1131 static int
1132 iwk_alloc_shared(iwk_sc_t *sc)
1133 {
1134 	iwk_dma_t *dma_p;
1135 	int err = DDI_SUCCESS;
1136 
1137 	/* must be aligned on a 4K-page boundary */
1138 	err = iwk_alloc_dma_mem(sc, sizeof (iwk_shared_t),
1139 	    &sh_dma_attr, &iwk_dma_accattr,
1140 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1141 	    &sc->sc_dma_sh);
1142 	if (err != DDI_SUCCESS)
1143 		goto fail;
1144 	sc->sc_shared = (iwk_shared_t *)sc->sc_dma_sh.mem_va;
1145 
1146 	dma_p = &sc->sc_dma_sh;
1147 	IWK_DBG((IWK_DEBUG_DMA, "sh[ncookies:%d addr:%lx size:%lx]\n",
1148 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1149 	    dma_p->cookie.dmac_size));
1150 
1151 	return (err);
1152 fail:
1153 	iwk_free_shared(sc);
1154 	return (err);
1155 }
1156 
1157 static void
1158 iwk_free_shared(iwk_sc_t *sc)
1159 {
1160 	iwk_free_dma_mem(&sc->sc_dma_sh);
1161 }
1162 
1163 /*
1164  * Allocate a keep warm page.
1165  */
1166 static int
1167 iwk_alloc_kw(iwk_sc_t *sc)
1168 {
1169 	iwk_dma_t *dma_p;
1170 	int err = DDI_SUCCESS;
1171 
1172 	/* must be aligned on a 4K-page boundary */
1173 	err = iwk_alloc_dma_mem(sc, IWK_KW_SIZE,
1174 	    &kw_dma_attr, &iwk_dma_accattr,
1175 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1176 	    &sc->sc_dma_kw);
1177 	if (err != DDI_SUCCESS)
1178 		goto fail;
1179 
1180 	dma_p = &sc->sc_dma_kw;
1181 	IWK_DBG((IWK_DEBUG_DMA, "kw[ncookies:%d addr:%lx size:%lx]\n",
1182 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1183 	    dma_p->cookie.dmac_size));
1184 
1185 	return (err);
1186 fail:
1187 	iwk_free_kw(sc);
1188 	return (err);
1189 }
1190 
1191 static void
1192 iwk_free_kw(iwk_sc_t *sc)
1193 {
1194 	iwk_free_dma_mem(&sc->sc_dma_kw);
1195 }
1196 
1197 static int
1198 iwk_alloc_rx_ring(iwk_sc_t *sc)
1199 {
1200 	iwk_rx_ring_t *ring;
1201 	iwk_rx_data_t *data;
1202 	iwk_dma_t *dma_p;
1203 	int i, err = DDI_SUCCESS;
1204 
1205 	ring = &sc->sc_rxq;
1206 	ring->cur = 0;
1207 
1208 	err = iwk_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t),
1209 	    &ring_desc_dma_attr, &iwk_dma_accattr,
1210 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1211 	    &ring->dma_desc);
1212 	if (err != DDI_SUCCESS) {
1213 		cmn_err(CE_WARN, "dma alloc rx ring desc failed\n");
1214 		goto fail;
1215 	}
1216 	ring->desc = (uint32_t *)ring->dma_desc.mem_va;
1217 	dma_p = &ring->dma_desc;
1218 	IWK_DBG((IWK_DEBUG_DMA, "rx bd[ncookies:%d addr:%lx size:%lx]\n",
1219 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1220 	    dma_p->cookie.dmac_size));
1221 
1222 	/*
1223 	 * Allocate Rx buffers.
1224 	 */
1225 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1226 		data = &ring->data[i];
1227 		err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1228 		    &rx_buffer_dma_attr, &iwk_dma_accattr,
1229 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1230 		    &data->dma_data);
1231 		if (err != DDI_SUCCESS) {
1232 			cmn_err(CE_WARN, "dma alloc rx ring buf[%d] "
1233 			    "failed\n", i);
1234 			goto fail;
1235 		}
1236 		/*
1237 		 * the physical address bit [8-36] are used,
1238 		 * instead of bit [0-31] in 3945.
1239 		 */
1240 		ring->desc[i] = LE_32((uint32_t)
1241 		    (data->dma_data.cookie.dmac_address >> 8));
1242 	}
1243 	dma_p = &ring->data[0].dma_data;
1244 	IWK_DBG((IWK_DEBUG_DMA, "rx buffer[0][ncookies:%d addr:%lx "
1245 	    "size:%lx]\n",
1246 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1247 	    dma_p->cookie.dmac_size));
1248 
1249 	IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1250 
1251 	return (err);
1252 
1253 fail:
1254 	iwk_free_rx_ring(sc);
1255 	return (err);
1256 }
1257 
1258 static void
1259 iwk_reset_rx_ring(iwk_sc_t *sc)
1260 {
1261 	int n;
1262 
1263 	iwk_mac_access_enter(sc);
1264 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1265 	for (n = 0; n < 2000; n++) {
1266 		if (IWK_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24))
1267 			break;
1268 		DELAY(1000);
1269 	}
1270 
1271 	if (n == 2000)
1272 		IWK_DBG((IWK_DEBUG_DMA, "timeout resetting Rx ring\n"));
1273 
1274 	iwk_mac_access_exit(sc);
1275 
1276 	sc->sc_rxq.cur = 0;
1277 }
1278 
1279 static void
1280 iwk_free_rx_ring(iwk_sc_t *sc)
1281 {
1282 	int i;
1283 
1284 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
1285 		if (sc->sc_rxq.data[i].dma_data.dma_hdl)
1286 			IWK_DMA_SYNC(sc->sc_rxq.data[i].dma_data,
1287 			    DDI_DMA_SYNC_FORCPU);
1288 		iwk_free_dma_mem(&sc->sc_rxq.data[i].dma_data);
1289 	}
1290 
1291 	if (sc->sc_rxq.dma_desc.dma_hdl)
1292 		IWK_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV);
1293 	iwk_free_dma_mem(&sc->sc_rxq.dma_desc);
1294 }
1295 
1296 static int
1297 iwk_alloc_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring,
1298     int slots, int qid)
1299 {
1300 	iwk_tx_data_t *data;
1301 	iwk_tx_desc_t *desc_h;
1302 	uint32_t paddr_desc_h;
1303 	iwk_cmd_t *cmd_h;
1304 	uint32_t paddr_cmd_h;
1305 	iwk_dma_t *dma_p;
1306 	int i, err = DDI_SUCCESS;
1307 
1308 	ring->qid = qid;
1309 	ring->count = TFD_QUEUE_SIZE_MAX;
1310 	ring->window = slots;
1311 	ring->queued = 0;
1312 	ring->cur = 0;
1313 
1314 	err = iwk_alloc_dma_mem(sc,
1315 	    TFD_QUEUE_SIZE_MAX * sizeof (iwk_tx_desc_t),
1316 	    &ring_desc_dma_attr, &iwk_dma_accattr,
1317 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1318 	    &ring->dma_desc);
1319 	if (err != DDI_SUCCESS) {
1320 		cmn_err(CE_WARN, "dma alloc tx ring desc[%d] "
1321 		    "failed\n", qid);
1322 		goto fail;
1323 	}
1324 	dma_p = &ring->dma_desc;
1325 	IWK_DBG((IWK_DEBUG_DMA, "tx bd[ncookies:%d addr:%lx size:%lx]\n",
1326 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1327 	    dma_p->cookie.dmac_size));
1328 
1329 	desc_h = (iwk_tx_desc_t *)ring->dma_desc.mem_va;
1330 	paddr_desc_h = ring->dma_desc.cookie.dmac_address;
1331 
1332 	err = iwk_alloc_dma_mem(sc,
1333 	    TFD_QUEUE_SIZE_MAX * sizeof (iwk_cmd_t),
1334 	    &cmd_dma_attr, &iwk_dma_accattr,
1335 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1336 	    &ring->dma_cmd);
1337 	if (err != DDI_SUCCESS) {
1338 		cmn_err(CE_WARN, "dma alloc tx ring cmd[%d] "
1339 		    "failed\n", qid);
1340 		goto fail;
1341 	}
1342 	dma_p = &ring->dma_cmd;
1343 	IWK_DBG((IWK_DEBUG_DMA, "tx cmd[ncookies:%d addr:%lx size:%lx]\n",
1344 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1345 	    dma_p->cookie.dmac_size));
1346 
1347 	cmd_h = (iwk_cmd_t *)ring->dma_cmd.mem_va;
1348 	paddr_cmd_h = ring->dma_cmd.cookie.dmac_address;
1349 
1350 	/*
1351 	 * Allocate Tx buffers.
1352 	 */
1353 	ring->data = kmem_zalloc(sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX,
1354 	    KM_NOSLEEP);
1355 	if (ring->data == NULL) {
1356 		cmn_err(CE_WARN, "could not allocate tx data slots\n");
1357 		goto fail;
1358 	}
1359 
1360 	for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) {
1361 		data = &ring->data[i];
1362 		err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1363 		    &tx_buffer_dma_attr, &iwk_dma_accattr,
1364 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
1365 		    &data->dma_data);
1366 		if (err != DDI_SUCCESS) {
1367 			cmn_err(CE_WARN, "dma alloc tx ring "
1368 			    "buf[%d] failed\n", i);
1369 			goto fail;
1370 		}
1371 
1372 		data->desc = desc_h + i;
1373 		data->paddr_desc = paddr_desc_h +
1374 		    _PTRDIFF(data->desc, desc_h);
1375 		data->cmd = cmd_h +  i; /* (i % slots); */
1376 		/* ((i % slots) * sizeof (iwk_cmd_t)); */
1377 		data->paddr_cmd = paddr_cmd_h +
1378 		    _PTRDIFF(data->cmd, cmd_h);
1379 	}
1380 	dma_p = &ring->data[0].dma_data;
1381 	IWK_DBG((IWK_DEBUG_DMA, "tx buffer[0][ncookies:%d addr:%lx "
1382 	    "size:%lx]\n",
1383 	    dma_p->ncookies, dma_p->cookie.dmac_address,
1384 	    dma_p->cookie.dmac_size));
1385 
1386 	return (err);
1387 
1388 fail:
1389 	if (ring->data)
1390 		kmem_free(ring->data,
1391 		    sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX);
1392 	iwk_free_tx_ring(sc, ring);
1393 	return (err);
1394 }
1395 
1396 static void
1397 iwk_reset_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring)
1398 {
1399 	iwk_tx_data_t *data;
1400 	int i, n;
1401 
1402 	iwk_mac_access_enter(sc);
1403 
1404 	IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0);
1405 	for (n = 0; n < 200; n++) {
1406 		if (IWK_READ(sc, IWK_FH_TSSR_TX_STATUS_REG) &
1407 		    IWK_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid))
1408 			break;
1409 		DELAY(10);
1410 	}
1411 	if (n == 200) {
1412 		IWK_DBG((IWK_DEBUG_DMA, "timeout reset tx ring %d\n",
1413 		    ring->qid));
1414 	}
1415 	iwk_mac_access_exit(sc);
1416 
1417 	for (i = 0; i < ring->count; i++) {
1418 		data = &ring->data[i];
1419 		IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
1420 	}
1421 
1422 	ring->queued = 0;
1423 	ring->cur = 0;
1424 }
1425 
1426 /*ARGSUSED*/
1427 static void
1428 iwk_free_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring)
1429 {
1430 	int i;
1431 
1432 	if (ring->dma_desc.dma_hdl != NULL)
1433 		IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1434 	iwk_free_dma_mem(&ring->dma_desc);
1435 
1436 	if (ring->dma_cmd.dma_hdl != NULL)
1437 		IWK_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV);
1438 	iwk_free_dma_mem(&ring->dma_cmd);
1439 
1440 	if (ring->data != NULL) {
1441 		for (i = 0; i < ring->count; i++) {
1442 			if (ring->data[i].dma_data.dma_hdl)
1443 				IWK_DMA_SYNC(ring->data[i].dma_data,
1444 				    DDI_DMA_SYNC_FORDEV);
1445 			iwk_free_dma_mem(&ring->data[i].dma_data);
1446 		}
1447 		kmem_free(ring->data, ring->count * sizeof (iwk_tx_data_t));
1448 	}
1449 }
1450 
1451 static int
1452 iwk_ring_init(iwk_sc_t *sc)
1453 {
1454 	int i, err = DDI_SUCCESS;
1455 
1456 	for (i = 0; i < IWK_NUM_QUEUES; i++) {
1457 		if (i == IWK_CMD_QUEUE_NUM)
1458 			continue;
1459 		err = iwk_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS,
1460 		    i);
1461 		if (err != DDI_SUCCESS)
1462 			goto fail;
1463 	}
1464 	err = iwk_alloc_tx_ring(sc, &sc->sc_txq[IWK_CMD_QUEUE_NUM],
1465 	    TFD_CMD_SLOTS, IWK_CMD_QUEUE_NUM);
1466 	if (err != DDI_SUCCESS)
1467 		goto fail;
1468 	err = iwk_alloc_rx_ring(sc);
1469 	if (err != DDI_SUCCESS)
1470 		goto fail;
1471 	return (err);
1472 
1473 fail:
1474 	return (err);
1475 }
1476 
1477 static void
1478 iwk_ring_free(iwk_sc_t *sc)
1479 {
1480 	int i = IWK_NUM_QUEUES;
1481 
1482 	iwk_free_rx_ring(sc);
1483 	while (--i >= 0) {
1484 		iwk_free_tx_ring(sc, &sc->sc_txq[i]);
1485 	}
1486 }
1487 
1488 /* ARGSUSED */
1489 static ieee80211_node_t *
1490 iwk_node_alloc(ieee80211com_t *ic)
1491 {
1492 	iwk_amrr_t *amrr;
1493 
1494 	amrr = kmem_zalloc(sizeof (iwk_amrr_t), KM_SLEEP);
1495 	if (amrr != NULL)
1496 		iwk_amrr_init(amrr);
1497 	return (&amrr->in);
1498 }
1499 
1500 static void
1501 iwk_node_free(ieee80211_node_t *in)
1502 {
1503 	ieee80211com_t *ic = in->in_ic;
1504 
1505 	ic->ic_node_cleanup(in);
1506 	if (in->in_wpa_ie != NULL)
1507 		ieee80211_free(in->in_wpa_ie);
1508 	kmem_free(in, sizeof (iwk_amrr_t));
1509 }
1510 
1511 /*ARGSUSED*/
1512 static int
1513 iwk_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1514 {
1515 	iwk_sc_t *sc = (iwk_sc_t *)ic;
1516 	ieee80211_node_t *in = ic->ic_bss;
1517 	enum ieee80211_state ostate = ic->ic_state;
1518 	int i, err = IWK_SUCCESS;
1519 
1520 	mutex_enter(&sc->sc_glock);
1521 	switch (nstate) {
1522 	case IEEE80211_S_SCAN:
1523 		switch (ostate) {
1524 		case IEEE80211_S_INIT:
1525 		{
1526 			iwk_add_sta_t node;
1527 
1528 			sc->sc_flags |= IWK_F_SCANNING;
1529 			iwk_set_led(sc, 2, 10, 2);
1530 
1531 			/*
1532 			 * clear association to receive beacons from
1533 			 * all BSS'es
1534 			 */
1535 			sc->sc_config.assoc_id = 0;
1536 			sc->sc_config.filter_flags &=
1537 			    ~LE_32(RXON_FILTER_ASSOC_MSK);
1538 
1539 			IWK_DBG((IWK_DEBUG_80211, "config chan %d "
1540 			    "flags %x filter_flags %x\n", sc->sc_config.chan,
1541 			    sc->sc_config.flags, sc->sc_config.filter_flags));
1542 
1543 			err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
1544 			    sizeof (iwk_rxon_cmd_t), 1);
1545 			if (err != IWK_SUCCESS) {
1546 				cmn_err(CE_WARN,
1547 				    "could not clear association\n");
1548 				sc->sc_flags &= ~IWK_F_SCANNING;
1549 				mutex_exit(&sc->sc_glock);
1550 				return (err);
1551 			}
1552 
1553 			/* add broadcast node to send probe request */
1554 			(void) memset(&node, 0, sizeof (node));
1555 			(void) memset(&node.bssid, 0xff, IEEE80211_ADDR_LEN);
1556 			node.id = IWK_BROADCAST_ID;
1557 			err = iwk_cmd(sc, REPLY_ADD_STA, &node,
1558 			    sizeof (node), 1);
1559 			if (err != IWK_SUCCESS) {
1560 				cmn_err(CE_WARN, "could not add "
1561 				    "broadcast node\n");
1562 				sc->sc_flags &= ~IWK_F_SCANNING;
1563 				mutex_exit(&sc->sc_glock);
1564 				return (err);
1565 			}
1566 			break;
1567 		}
1568 		case IEEE80211_S_SCAN:
1569 			mutex_exit(&sc->sc_glock);
1570 			/* step to next channel before actual FW scan */
1571 			err = sc->sc_newstate(ic, nstate, arg);
1572 			mutex_enter(&sc->sc_glock);
1573 			if ((err != 0) || ((err = iwk_scan(sc)) != 0)) {
1574 				cmn_err(CE_WARN,
1575 				    "could not initiate scan\n");
1576 				sc->sc_flags &= ~IWK_F_SCANNING;
1577 				ieee80211_cancel_scan(ic);
1578 			}
1579 			mutex_exit(&sc->sc_glock);
1580 			return (err);
1581 		default:
1582 			break;
1583 
1584 		}
1585 		sc->sc_clk = 0;
1586 		break;
1587 
1588 	case IEEE80211_S_AUTH:
1589 		if (ostate == IEEE80211_S_SCAN) {
1590 			sc->sc_flags &= ~IWK_F_SCANNING;
1591 		}
1592 
1593 		/* reset state to handle reassociations correctly */
1594 		sc->sc_config.assoc_id = 0;
1595 		sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
1596 
1597 		/*
1598 		 * before sending authentication and association request frame,
1599 		 * we need do something in the hardware, such as setting the
1600 		 * channel same to the target AP...
1601 		 */
1602 		if ((err = iwk_hw_set_before_auth(sc)) != 0) {
1603 			cmn_err(CE_WARN, "could not setup firmware for "
1604 			    "authentication\n");
1605 			mutex_exit(&sc->sc_glock);
1606 			return (err);
1607 		}
1608 		break;
1609 
1610 	case IEEE80211_S_RUN:
1611 		if (ostate == IEEE80211_S_SCAN) {
1612 			sc->sc_flags &= ~IWK_F_SCANNING;
1613 		}
1614 
1615 		if (ic->ic_opmode == IEEE80211_M_MONITOR) {
1616 			/* let LED blink when monitoring */
1617 			iwk_set_led(sc, 2, 10, 10);
1618 			break;
1619 		}
1620 		IWK_DBG((IWK_DEBUG_80211, "iwk: associated."));
1621 
1622 		/* none IBSS mode */
1623 		if (ic->ic_opmode != IEEE80211_M_IBSS) {
1624 			/* update adapter's configuration */
1625 			if (sc->sc_assoc_id != in->in_associd) {
1626 				cmn_err(CE_WARN,
1627 				    "associate ID mismatch: expected %d, "
1628 				    "got %d\n",
1629 				    in->in_associd, sc->sc_assoc_id);
1630 			}
1631 			sc->sc_config.assoc_id = in->in_associd & 0x3fff;
1632 			/*
1633 			 * short preamble/slot time are
1634 			 * negotiated when associating
1635 			 */
1636 			sc->sc_config.flags &=
1637 			    ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
1638 			    RXON_FLG_SHORT_SLOT_MSK);
1639 
1640 			if (ic->ic_flags & IEEE80211_F_SHSLOT)
1641 				sc->sc_config.flags |=
1642 				    LE_32(RXON_FLG_SHORT_SLOT_MSK);
1643 
1644 			if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
1645 				sc->sc_config.flags |=
1646 				    LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
1647 
1648 			sc->sc_config.filter_flags |=
1649 			    LE_32(RXON_FILTER_ASSOC_MSK);
1650 
1651 			if (ic->ic_opmode != IEEE80211_M_STA)
1652 				sc->sc_config.filter_flags |=
1653 				    LE_32(RXON_FILTER_BCON_AWARE_MSK);
1654 
1655 			IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x"
1656 			    " filter_flags %x\n",
1657 			    sc->sc_config.chan, sc->sc_config.flags,
1658 			    sc->sc_config.filter_flags));
1659 			err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
1660 			    sizeof (iwk_rxon_cmd_t), 1);
1661 			if (err != IWK_SUCCESS) {
1662 				cmn_err(CE_WARN, "could not update "
1663 				    "configuration\n");
1664 				mutex_exit(&sc->sc_glock);
1665 				return (err);
1666 			}
1667 		}
1668 
1669 		/* obtain current temperature of chipset */
1670 		sc->sc_tempera = iwk_curr_tempera(sc);
1671 
1672 		/*
1673 		 * make Tx power calibration to determine
1674 		 * the gains of DSP and radio
1675 		 */
1676 		err = iwk_tx_power_calibration(sc);
1677 		if (err) {
1678 			cmn_err(CE_WARN, "iwk_newstate(): "
1679 			    "failed to set tx power table\n");
1680 			return (err);
1681 		}
1682 
1683 		/* start automatic rate control */
1684 		mutex_enter(&sc->sc_mt_lock);
1685 		if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
1686 			sc->sc_flags |= IWK_F_RATE_AUTO_CTL;
1687 			/* set rate to some reasonable initial value */
1688 			i = in->in_rates.ir_nrates - 1;
1689 			while (i > 0 && IEEE80211_RATE(i) > 72)
1690 				i--;
1691 			in->in_txrate = i;
1692 		} else {
1693 			sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL;
1694 		}
1695 		mutex_exit(&sc->sc_mt_lock);
1696 
1697 		/* set LED on after associated */
1698 		iwk_set_led(sc, 2, 0, 1);
1699 		break;
1700 
1701 	case IEEE80211_S_INIT:
1702 		if (ostate == IEEE80211_S_SCAN) {
1703 			sc->sc_flags &= ~IWK_F_SCANNING;
1704 		}
1705 
1706 		/* set LED off after init */
1707 		iwk_set_led(sc, 2, 1, 0);
1708 		break;
1709 	case IEEE80211_S_ASSOC:
1710 		if (ostate == IEEE80211_S_SCAN) {
1711 			sc->sc_flags &= ~IWK_F_SCANNING;
1712 		}
1713 
1714 		break;
1715 	}
1716 
1717 	mutex_exit(&sc->sc_glock);
1718 
1719 	err = sc->sc_newstate(ic, nstate, arg);
1720 
1721 	if (nstate == IEEE80211_S_RUN) {
1722 
1723 		mutex_enter(&sc->sc_glock);
1724 
1725 		/*
1726 		 * make initialization for Receiver
1727 		 * sensitivity calibration
1728 		 */
1729 		err = iwk_rx_sens_init(sc);
1730 		if (err) {
1731 			cmn_err(CE_WARN, "iwk_newstate(): "
1732 			    "failed to init RX sensitivity\n");
1733 			mutex_exit(&sc->sc_glock);
1734 			return (err);
1735 		}
1736 
1737 		/* make initialization for Receiver gain balance */
1738 		err = iwk_rxgain_diff_init(sc);
1739 		if (err) {
1740 			cmn_err(CE_WARN, "iwk_newstate(): "
1741 			    "failed to init phy calibration\n");
1742 			mutex_exit(&sc->sc_glock);
1743 			return (err);
1744 		}
1745 
1746 		mutex_exit(&sc->sc_glock);
1747 
1748 	}
1749 
1750 	return (err);
1751 }
1752 
1753 /*ARGSUSED*/
1754 static int iwk_key_set(ieee80211com_t *ic, const struct ieee80211_key *k,
1755     const uint8_t mac[IEEE80211_ADDR_LEN])
1756 {
1757 	iwk_sc_t *sc = (iwk_sc_t *)ic;
1758 	iwk_add_sta_t node;
1759 	int err;
1760 
1761 	switch (k->wk_cipher->ic_cipher) {
1762 	case IEEE80211_CIPHER_WEP:
1763 	case IEEE80211_CIPHER_TKIP:
1764 		return (1); /* sofeware do it. */
1765 	case IEEE80211_CIPHER_AES_CCM:
1766 		break;
1767 	default:
1768 		return (0);
1769 	}
1770 	sc->sc_config.filter_flags &= ~(RXON_FILTER_DIS_DECRYPT_MSK |
1771 	    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
1772 
1773 	mutex_enter(&sc->sc_glock);
1774 
1775 	/* update ap/multicast node */
1776 	(void) memset(&node, 0, sizeof (node));
1777 	if (IEEE80211_IS_MULTICAST(mac)) {
1778 		(void) memset(node.bssid, 0xff, 6);
1779 		node.id = IWK_BROADCAST_ID;
1780 	} else {
1781 		IEEE80211_ADDR_COPY(node.bssid, ic->ic_bss->in_bssid);
1782 		node.id = IWK_AP_ID;
1783 	}
1784 	if (k->wk_flags & IEEE80211_KEY_XMIT) {
1785 		node.key_flags = 0;
1786 		node.keyp = k->wk_keyix;
1787 	} else {
1788 		node.key_flags = (1 << 14);
1789 		node.keyp = k->wk_keyix + 4;
1790 	}
1791 	(void) memcpy(node.key, k->wk_key, k->wk_keylen);
1792 	node.key_flags |= (STA_KEY_FLG_CCMP | (1 << 3) | (k->wk_keyix << 8));
1793 	node.sta_mask = STA_MODIFY_KEY_MASK;
1794 	node.control = 1;
1795 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
1796 	if (err != IWK_SUCCESS) {
1797 		cmn_err(CE_WARN, "iwk_key_set():"
1798 		    "failed to update ap node\n");
1799 		mutex_exit(&sc->sc_glock);
1800 		return (0);
1801 	}
1802 	mutex_exit(&sc->sc_glock);
1803 	return (1);
1804 }
1805 
1806 /*
1807  * exclusive access to mac begin.
1808  */
1809 static void
1810 iwk_mac_access_enter(iwk_sc_t *sc)
1811 {
1812 	uint32_t tmp;
1813 	int n;
1814 
1815 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
1816 	IWK_WRITE(sc, CSR_GP_CNTRL,
1817 	    tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1818 
1819 	/* wait until we succeed */
1820 	for (n = 0; n < 1000; n++) {
1821 		if ((IWK_READ(sc, CSR_GP_CNTRL) &
1822 		    (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1823 		    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) ==
1824 		    CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN)
1825 			break;
1826 		DELAY(10);
1827 	}
1828 	if (n == 1000)
1829 		IWK_DBG((IWK_DEBUG_PIO, "could not lock memory\n"));
1830 }
1831 
1832 /*
1833  * exclusive access to mac end.
1834  */
1835 static void
1836 iwk_mac_access_exit(iwk_sc_t *sc)
1837 {
1838 	uint32_t tmp = IWK_READ(sc, CSR_GP_CNTRL);
1839 	IWK_WRITE(sc, CSR_GP_CNTRL,
1840 	    tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1841 }
1842 
1843 static uint32_t
1844 iwk_mem_read(iwk_sc_t *sc, uint32_t addr)
1845 {
1846 	IWK_WRITE(sc, HBUS_TARG_MEM_RADDR, addr);
1847 	return (IWK_READ(sc, HBUS_TARG_MEM_RDAT));
1848 }
1849 
1850 static void
1851 iwk_mem_write(iwk_sc_t *sc, uint32_t addr, uint32_t data)
1852 {
1853 	IWK_WRITE(sc, HBUS_TARG_MEM_WADDR, addr);
1854 	IWK_WRITE(sc, HBUS_TARG_MEM_WDAT, data);
1855 }
1856 
1857 static uint32_t
1858 iwk_reg_read(iwk_sc_t *sc, uint32_t addr)
1859 {
1860 	IWK_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24));
1861 	return (IWK_READ(sc, HBUS_TARG_PRPH_RDAT));
1862 }
1863 
1864 static void
1865 iwk_reg_write(iwk_sc_t *sc, uint32_t addr, uint32_t data)
1866 {
1867 	IWK_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24));
1868 	IWK_WRITE(sc, HBUS_TARG_PRPH_WDAT, data);
1869 }
1870 
1871 static void
1872 iwk_reg_write_region_4(iwk_sc_t *sc, uint32_t addr,
1873     uint32_t *data, int wlen)
1874 {
1875 	for (; wlen > 0; wlen--, data++, addr += 4)
1876 		iwk_reg_write(sc, addr, *data);
1877 }
1878 
1879 
1880 /*
1881  * ucode load/initialization steps:
1882  * 1)  load Bootstrap State Machine (BSM) with "bootstrap" uCode image.
1883  * BSM contains a small memory that *always* stays powered up, so it can
1884  * retain the bootstrap program even when the card is in a power-saving
1885  * power-down state.  The BSM loads the small program into ARC processor's
1886  * instruction memory when triggered by power-up.
1887  * 2)  load Initialize image via bootstrap program.
1888  * The Initialize image sets up regulatory and calibration data for the
1889  * Runtime/Protocol uCode. This sends a REPLY_ALIVE notification when completed.
1890  * The 4965 reply contains calibration data for temperature, voltage and tx gain
1891  * correction.
1892  */
1893 static int
1894 iwk_load_firmware(iwk_sc_t *sc)
1895 {
1896 	uint32_t *boot_fw = (uint32_t *)sc->sc_boot;
1897 	uint32_t size = sc->sc_hdr->bootsz;
1898 	int n, err = IWK_SUCCESS;
1899 
1900 	/*
1901 	 * The physical address bit [4-35] of the initialize uCode.
1902 	 * In the initialize alive notify interrupt the physical address of
1903 	 * the runtime ucode will be set for loading.
1904 	 */
1905 	iwk_mac_access_enter(sc);
1906 
1907 	iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG,
1908 	    sc->sc_dma_fw_init_text.cookie.dmac_address >> 4);
1909 	iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG,
1910 	    sc->sc_dma_fw_init_data.cookie.dmac_address >> 4);
1911 	iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG,
1912 	    sc->sc_dma_fw_init_text.cookie.dmac_size);
1913 	iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG,
1914 	    sc->sc_dma_fw_init_data.cookie.dmac_size);
1915 
1916 	/* load bootstrap code into BSM memory */
1917 	iwk_reg_write_region_4(sc, BSM_SRAM_LOWER_BOUND, boot_fw,
1918 	    size / sizeof (uint32_t));
1919 
1920 	iwk_reg_write(sc, BSM_WR_MEM_SRC_REG, 0);
1921 	iwk_reg_write(sc, BSM_WR_MEM_DST_REG, RTC_INST_LOWER_BOUND);
1922 	iwk_reg_write(sc, BSM_WR_DWCOUNT_REG, size / sizeof (uint32_t));
1923 
1924 	/*
1925 	 * prepare to load initialize uCode
1926 	 */
1927 	iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
1928 
1929 	/* wait while the adapter is busy loading the firmware */
1930 	for (n = 0; n < 1000; n++) {
1931 		if (!(iwk_reg_read(sc, BSM_WR_CTRL_REG) &
1932 		    BSM_WR_CTRL_REG_BIT_START))
1933 			break;
1934 		DELAY(10);
1935 	}
1936 	if (n == 1000) {
1937 		cmn_err(CE_WARN, "timeout transferring firmware\n");
1938 		err = ETIMEDOUT;
1939 		return (err);
1940 	}
1941 
1942 	/* for future power-save mode use */
1943 	iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
1944 
1945 	iwk_mac_access_exit(sc);
1946 
1947 	return (err);
1948 }
1949 
1950 /*ARGSUSED*/
1951 static void
1952 iwk_rx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data)
1953 {
1954 	ieee80211com_t *ic = &sc->sc_ic;
1955 	iwk_rx_ring_t *ring = &sc->sc_rxq;
1956 	iwk_rx_phy_res_t *stat;
1957 	ieee80211_node_t *in;
1958 	uint32_t *tail;
1959 	struct ieee80211_frame *wh;
1960 	mblk_t *mp;
1961 	uint16_t len, rssi, mrssi, agc;
1962 	int16_t t;
1963 	uint32_t ants, i;
1964 	struct iwk_rx_non_cfg_phy *phyinfo;
1965 
1966 	/* assuming not 11n here. cope with 11n in phase-II */
1967 	stat = (iwk_rx_phy_res_t *)(desc + 1);
1968 	if (stat->cfg_phy_cnt > 20) {
1969 		return;
1970 	}
1971 
1972 	phyinfo = (struct iwk_rx_non_cfg_phy *)stat->non_cfg_phy;
1973 	agc = (phyinfo->agc_info & IWK_AGC_DB_MASK) >> IWK_AGC_DB_POS;
1974 	mrssi = 0;
1975 	ants = (stat->phy_flags & RX_PHY_FLAGS_ANTENNAE_MASK) >>
1976 	    RX_PHY_FLAGS_ANTENNAE_OFFSET;
1977 	for (i = 0; i < 3; i++) {
1978 		if (ants & (1 << i))
1979 			mrssi = MAX(mrssi, phyinfo->rssi_info[i << 1]);
1980 	}
1981 	t = mrssi - agc - 44; /* t is the dBM value */
1982 	/*
1983 	 * convert dBm to percentage ???
1984 	 */
1985 	rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t))) /
1986 	    (75 * 75);
1987 	if (rssi > 100)
1988 		rssi = 100;
1989 	if (rssi < 1)
1990 		rssi = 1;
1991 	len = stat->byte_count;
1992 	tail = (uint32_t *)((uint8_t *)(stat + 1) + stat->cfg_phy_cnt + len);
1993 
1994 	IWK_DBG((IWK_DEBUG_RX, "rx intr: idx=%d phy_len=%x len=%d "
1995 	    "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x "
1996 	    "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat),
1997 	    len, stat->rate.r.s.rate, stat->channel,
1998 	    LE_32(stat->timestampl), stat->non_cfg_phy_cnt,
1999 	    stat->cfg_phy_cnt, LE_32(*tail)));
2000 
2001 	if ((len < 16) || (len > sc->sc_dmabuf_sz)) {
2002 		IWK_DBG((IWK_DEBUG_RX, "rx frame oversize\n"));
2003 		return;
2004 	}
2005 
2006 	/*
2007 	 * discard Rx frames with bad CRC
2008 	 */
2009 	if ((LE_32(*tail) &
2010 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) !=
2011 	    (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) {
2012 		IWK_DBG((IWK_DEBUG_RX, "rx crc error tail: %x\n",
2013 		    LE_32(*tail)));
2014 		sc->sc_rx_err++;
2015 		return;
2016 	}
2017 
2018 	wh = (struct ieee80211_frame *)
2019 	    ((uint8_t *)(stat + 1)+ stat->cfg_phy_cnt);
2020 	if (*(uint8_t *)wh == IEEE80211_FC0_SUBTYPE_ASSOC_RESP) {
2021 		sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2);
2022 		IWK_DBG((IWK_DEBUG_RX, "rx : association id = %x\n",
2023 		    sc->sc_assoc_id));
2024 	}
2025 #ifdef DEBUG
2026 	if (iwk_dbg_flags & IWK_DEBUG_RX)
2027 		ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0);
2028 #endif
2029 	in = ieee80211_find_rxnode(ic, wh);
2030 	mp = allocb(len, BPRI_MED);
2031 	if (mp) {
2032 		(void) memcpy(mp->b_wptr, wh, len);
2033 		mp->b_wptr += len;
2034 
2035 		/* send the frame to the 802.11 layer */
2036 		(void) ieee80211_input(ic, mp, in, rssi, 0);
2037 	} else {
2038 		sc->sc_rx_nobuf++;
2039 		IWK_DBG((IWK_DEBUG_RX,
2040 		    "iwk_rx_intr(): alloc rx buf failed\n"));
2041 	}
2042 	/* release node reference */
2043 	ieee80211_free_node(in);
2044 }
2045 
2046 /*ARGSUSED*/
2047 static void
2048 iwk_tx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data)
2049 {
2050 	ieee80211com_t *ic = &sc->sc_ic;
2051 	iwk_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3];
2052 	iwk_tx_stat_t *stat = (iwk_tx_stat_t *)(desc + 1);
2053 	iwk_amrr_t *amrr = (iwk_amrr_t *)ic->ic_bss;
2054 
2055 	IWK_DBG((IWK_DEBUG_TX, "tx done: qid=%d idx=%d"
2056 	    " retries=%d frame_count=%x nkill=%d "
2057 	    "rate=%x duration=%d status=%x\n",
2058 	    desc->hdr.qid, desc->hdr.idx, stat->ntries, stat->frame_count,
2059 	    stat->bt_kill_count, stat->rate.r.s.rate,
2060 	    LE_32(stat->duration), LE_32(stat->status)));
2061 
2062 	amrr->txcnt++;
2063 	IWK_DBG((IWK_DEBUG_RATECTL, "tx: %d cnt\n", amrr->txcnt));
2064 	if (stat->ntries > 0) {
2065 		amrr->retrycnt++;
2066 		sc->sc_tx_retries++;
2067 		IWK_DBG((IWK_DEBUG_TX, "tx: %d retries\n",
2068 		    sc->sc_tx_retries));
2069 	}
2070 
2071 	sc->sc_tx_timer = 0;
2072 
2073 	mutex_enter(&sc->sc_tx_lock);
2074 	ring->queued--;
2075 	if (ring->queued < 0)
2076 		ring->queued = 0;
2077 	if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count << 3))) {
2078 		sc->sc_need_reschedule = 0;
2079 		mutex_exit(&sc->sc_tx_lock);
2080 		mac_tx_update(ic->ic_mach);
2081 		mutex_enter(&sc->sc_tx_lock);
2082 	}
2083 	mutex_exit(&sc->sc_tx_lock);
2084 }
2085 
2086 static void
2087 iwk_cmd_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc)
2088 {
2089 	if ((desc->hdr.qid & 7) != 4) {
2090 		return;
2091 	}
2092 	mutex_enter(&sc->sc_glock);
2093 	sc->sc_flags |= IWK_F_CMD_DONE;
2094 	cv_signal(&sc->sc_cmd_cv);
2095 	mutex_exit(&sc->sc_glock);
2096 	IWK_DBG((IWK_DEBUG_CMD, "rx cmd: "
2097 	    "qid=%x idx=%d flags=%x type=0x%x\n",
2098 	    desc->hdr.qid, desc->hdr.idx, desc->hdr.flags,
2099 	    desc->hdr.type));
2100 }
2101 
2102 static void
2103 iwk_ucode_alive(iwk_sc_t *sc, iwk_rx_desc_t *desc)
2104 {
2105 	uint32_t base, i;
2106 	struct iwk_alive_resp *ar =
2107 	    (struct iwk_alive_resp *)(desc + 1);
2108 
2109 	/* the microcontroller is ready */
2110 	IWK_DBG((IWK_DEBUG_FW,
2111 	    "microcode alive notification minor: %x major: %x type:"
2112 	    " %x subtype: %x\n",
2113 	    ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype));
2114 
2115 	if (LE_32(ar->is_valid) != UCODE_VALID_OK) {
2116 		IWK_DBG((IWK_DEBUG_FW,
2117 		    "microcontroller initialization failed\n"));
2118 	}
2119 	if (ar->ver_subtype == INITIALIZE_SUBTYPE) {
2120 		IWK_DBG((IWK_DEBUG_FW,
2121 		    "initialization alive received.\n"));
2122 		(void) memcpy(&sc->sc_card_alive_init, ar,
2123 		    sizeof (struct iwk_init_alive_resp));
2124 		/* XXX get temperature */
2125 		iwk_mac_access_enter(sc);
2126 		iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG,
2127 		    sc->sc_dma_fw_text.cookie.dmac_address >> 4);
2128 		iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG,
2129 		    sc->sc_dma_fw_data_bak.cookie.dmac_address >> 4);
2130 		iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG,
2131 		    sc->sc_dma_fw_data.cookie.dmac_size);
2132 		iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG,
2133 		    sc->sc_dma_fw_text.cookie.dmac_size | 0x80000000);
2134 		iwk_mac_access_exit(sc);
2135 	} else {
2136 		IWK_DBG((IWK_DEBUG_FW, "runtime alive received.\n"));
2137 		(void) memcpy(&sc->sc_card_alive_run, ar,
2138 		    sizeof (struct iwk_alive_resp));
2139 
2140 		/*
2141 		 * Init SCD related registers to make Tx work. XXX
2142 		 */
2143 		iwk_mac_access_enter(sc);
2144 
2145 		/* read sram address of data base */
2146 		sc->sc_scd_base = iwk_reg_read(sc, SCD_SRAM_BASE_ADDR);
2147 
2148 		/* clear and init SCD_CONTEXT_DATA_OFFSET area. 128 bytes */
2149 		for (base = sc->sc_scd_base + SCD_CONTEXT_DATA_OFFSET, i = 0;
2150 		    i < 128; i += 4)
2151 			iwk_mem_write(sc, base + i, 0);
2152 
2153 		/* clear and init SCD_TX_STTS_BITMAP_OFFSET area. 256 bytes */
2154 		for (base = sc->sc_scd_base + SCD_TX_STTS_BITMAP_OFFSET;
2155 		    i < 256; i += 4)
2156 			iwk_mem_write(sc, base + i, 0);
2157 
2158 		/* clear and init SCD_TRANSLATE_TBL_OFFSET area. 32 bytes */
2159 		for (base = sc->sc_scd_base + SCD_TRANSLATE_TBL_OFFSET;
2160 		    i < sizeof (uint16_t) * IWK_NUM_QUEUES; i += 4)
2161 			iwk_mem_write(sc, base + i, 0);
2162 
2163 		iwk_reg_write(sc, SCD_DRAM_BASE_ADDR,
2164 		    sc->sc_dma_sh.cookie.dmac_address >> 10);
2165 		iwk_reg_write(sc, SCD_QUEUECHAIN_SEL, 0);
2166 
2167 		/* initiate the tx queues */
2168 		for (i = 0; i < IWK_NUM_QUEUES; i++) {
2169 			iwk_reg_write(sc, SCD_QUEUE_RDPTR(i), 0);
2170 			IWK_WRITE(sc, HBUS_TARG_WRPTR, (i << 8));
2171 			iwk_mem_write(sc, sc->sc_scd_base +
2172 			    SCD_CONTEXT_QUEUE_OFFSET(i),
2173 			    (SCD_WIN_SIZE & 0x7f));
2174 			iwk_mem_write(sc, sc->sc_scd_base +
2175 			    SCD_CONTEXT_QUEUE_OFFSET(i) + sizeof (uint32_t),
2176 			    (SCD_FRAME_LIMIT & 0x7f) << 16);
2177 		}
2178 		/* interrupt enable on each queue0-7 */
2179 		iwk_reg_write(sc, SCD_INTERRUPT_MASK,
2180 		    (1 << IWK_NUM_QUEUES) - 1);
2181 		/* enable  each channel 0-7 */
2182 		iwk_reg_write(sc, SCD_TXFACT,
2183 		    SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
2184 		/*
2185 		 * queue 0-7 maps to FIFO 0-7 and
2186 		 * all queues work under FIFO mode (none-scheduler-ack)
2187 		 */
2188 		for (i = 0; i < 7; i++) {
2189 			iwk_reg_write(sc,
2190 			    SCD_QUEUE_STATUS_BITS(i),
2191 			    (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
2192 			    (i << SCD_QUEUE_STTS_REG_POS_TXF)|
2193 			    SCD_QUEUE_STTS_REG_MSK);
2194 		}
2195 		iwk_mac_access_exit(sc);
2196 
2197 		sc->sc_flags |= IWK_F_FW_INIT;
2198 		cv_signal(&sc->sc_fw_cv);
2199 	}
2200 
2201 }
2202 
2203 static uint_t
2204 /* LINTED: argument unused in function: unused */
2205 iwk_rx_softintr(caddr_t arg, caddr_t unused)
2206 {
2207 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2208 	ieee80211com_t *ic = &sc->sc_ic;
2209 	iwk_rx_desc_t *desc;
2210 	iwk_rx_data_t *data;
2211 	uint32_t index;
2212 
2213 	mutex_enter(&sc->sc_glock);
2214 	if (sc->sc_rx_softint_pending != 1) {
2215 		mutex_exit(&sc->sc_glock);
2216 		return (DDI_INTR_UNCLAIMED);
2217 	}
2218 	/* disable interrupts */
2219 	IWK_WRITE(sc, CSR_INT_MASK, 0);
2220 	mutex_exit(&sc->sc_glock);
2221 
2222 	/*
2223 	 * firmware has moved the index of the rx queue, driver get it,
2224 	 * and deal with it.
2225 	 */
2226 	index = LE_32(sc->sc_shared->val0) & 0xfff;
2227 
2228 	while (sc->sc_rxq.cur != index) {
2229 		data = &sc->sc_rxq.data[sc->sc_rxq.cur];
2230 		desc = (iwk_rx_desc_t *)data->dma_data.mem_va;
2231 
2232 		IWK_DBG((IWK_DEBUG_INTR, "rx notification index = %d"
2233 		    " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n",
2234 		    index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx,
2235 		    desc->hdr.flags, desc->hdr.type, LE_32(desc->len)));
2236 
2237 		/* a command other than a tx need to be replied */
2238 		if (!(desc->hdr.qid & 0x80) &&
2239 		    (desc->hdr.type != REPLY_RX_PHY_CMD) &&
2240 		    (desc->hdr.type != REPLY_TX) &&
2241 		    (desc->hdr.type != REPLY_TX_PWR_TABLE_CMD) &&
2242 		    (desc->hdr.type != REPLY_PHY_CALIBRATION_CMD) &&
2243 		    (desc->hdr.type != SENSITIVITY_CMD))
2244 			iwk_cmd_intr(sc, desc);
2245 
2246 		switch (desc->hdr.type) {
2247 		case REPLY_4965_RX:
2248 			iwk_rx_intr(sc, desc, data);
2249 			break;
2250 
2251 		case REPLY_TX:
2252 			iwk_tx_intr(sc, desc, data);
2253 			break;
2254 
2255 		case REPLY_ALIVE:
2256 			iwk_ucode_alive(sc, desc);
2257 			break;
2258 
2259 		case CARD_STATE_NOTIFICATION:
2260 		{
2261 			uint32_t *status = (uint32_t *)(desc + 1);
2262 
2263 			IWK_DBG((IWK_DEBUG_RADIO, "state changed to %x\n",
2264 			    LE_32(*status)));
2265 
2266 			if (LE_32(*status) & 1) {
2267 				/*
2268 				 * the radio button has to be pushed(OFF). It
2269 				 * is considered as a hw error, the
2270 				 * iwk_thread() tries to recover it after the
2271 				 * button is pushed again(ON)
2272 				 */
2273 				cmn_err(CE_NOTE,
2274 				    "iwk_rx_softintr(): "
2275 				    "Radio transmitter is off\n");
2276 				sc->sc_ostate = sc->sc_ic.ic_state;
2277 				ieee80211_new_state(&sc->sc_ic,
2278 				    IEEE80211_S_INIT, -1);
2279 				sc->sc_flags |=
2280 				    (IWK_F_HW_ERR_RECOVER | IWK_F_RADIO_OFF);
2281 			}
2282 			break;
2283 		}
2284 		case SCAN_START_NOTIFICATION:
2285 		{
2286 			iwk_start_scan_t *scan =
2287 			    (iwk_start_scan_t *)(desc + 1);
2288 
2289 			IWK_DBG((IWK_DEBUG_SCAN,
2290 			    "scanning channel %d status %x\n",
2291 			    scan->chan, LE_32(scan->status)));
2292 
2293 			ic->ic_curchan = &ic->ic_sup_channels[scan->chan];
2294 			break;
2295 		}
2296 		case SCAN_COMPLETE_NOTIFICATION:
2297 		{
2298 			iwk_stop_scan_t *scan =
2299 			    (iwk_stop_scan_t *)(desc + 1);
2300 
2301 			IWK_DBG((IWK_DEBUG_SCAN,
2302 			    "completed channel %d (burst of %d) status %02x\n",
2303 			    scan->chan, scan->nchan, scan->status));
2304 
2305 			sc->sc_scan_pending++;
2306 			break;
2307 		}
2308 		case STATISTICS_NOTIFICATION:
2309 			/* handle statistics notification */
2310 			iwk_statistics_notify(sc, desc);
2311 			break;
2312 		}
2313 
2314 		sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE;
2315 	}
2316 
2317 	/*
2318 	 * driver dealt with what reveived in rx queue and tell the information
2319 	 * to the firmware.
2320 	 */
2321 	index = (index == 0) ? RX_QUEUE_SIZE - 1 : index - 1;
2322 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7));
2323 
2324 	mutex_enter(&sc->sc_glock);
2325 	/* re-enable interrupts */
2326 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2327 	sc->sc_rx_softint_pending = 0;
2328 	mutex_exit(&sc->sc_glock);
2329 
2330 	return (DDI_INTR_CLAIMED);
2331 }
2332 
2333 static uint_t
2334 /* LINTED: argument unused in function: unused */
2335 iwk_intr(caddr_t arg, caddr_t unused)
2336 {
2337 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2338 	uint32_t r, rfh;
2339 
2340 	mutex_enter(&sc->sc_glock);
2341 
2342 	if (sc->sc_flags & IWK_F_SUSPEND) {
2343 		mutex_exit(&sc->sc_glock);
2344 		return (DDI_INTR_UNCLAIMED);
2345 	}
2346 
2347 	r = IWK_READ(sc, CSR_INT);
2348 	if (r == 0 || r == 0xffffffff) {
2349 		mutex_exit(&sc->sc_glock);
2350 		return (DDI_INTR_UNCLAIMED);
2351 	}
2352 
2353 	IWK_DBG((IWK_DEBUG_INTR, "interrupt reg %x\n", r));
2354 
2355 	rfh = IWK_READ(sc, CSR_FH_INT_STATUS);
2356 	IWK_DBG((IWK_DEBUG_INTR, "FH interrupt reg %x\n", rfh));
2357 	/* disable interrupts */
2358 	IWK_WRITE(sc, CSR_INT_MASK, 0);
2359 	/* ack interrupts */
2360 	IWK_WRITE(sc, CSR_INT, r);
2361 	IWK_WRITE(sc, CSR_FH_INT_STATUS, rfh);
2362 
2363 	if (sc->sc_soft_hdl == NULL) {
2364 		mutex_exit(&sc->sc_glock);
2365 		return (DDI_INTR_CLAIMED);
2366 	}
2367 	if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) {
2368 		cmn_err(CE_WARN, "fatal firmware error\n");
2369 		mutex_exit(&sc->sc_glock);
2370 #ifdef DEBUG
2371 		/* dump event and error logs to dmesg */
2372 		iwk_write_error_log(sc);
2373 		iwk_write_event_log(sc);
2374 #endif /* DEBUG */
2375 		iwk_stop(sc);
2376 		sc->sc_ostate = sc->sc_ic.ic_state;
2377 		ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
2378 		sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
2379 		return (DDI_INTR_CLAIMED);
2380 	}
2381 
2382 	if (r & BIT_INT_RF_KILL) {
2383 		IWK_DBG((IWK_DEBUG_RADIO, "RF kill\n"));
2384 	}
2385 
2386 	if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) ||
2387 	    (rfh & FH_INT_RX_MASK)) {
2388 		sc->sc_rx_softint_pending = 1;
2389 		(void) ddi_intr_trigger_softint(sc->sc_soft_hdl, NULL);
2390 	}
2391 
2392 	if (r & BIT_INT_ALIVE)	{
2393 		IWK_DBG((IWK_DEBUG_FW, "firmware initialized.\n"));
2394 	}
2395 
2396 	/* re-enable interrupts */
2397 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2398 	mutex_exit(&sc->sc_glock);
2399 
2400 	return (DDI_INTR_CLAIMED);
2401 }
2402 
2403 static uint8_t
2404 iwk_rate_to_plcp(int rate)
2405 {
2406 	uint8_t ret;
2407 
2408 	switch (rate) {
2409 	/* CCK rates */
2410 	case 2:
2411 		ret = 0xa;
2412 		break;
2413 	case 4:
2414 		ret = 0x14;
2415 		break;
2416 	case 11:
2417 		ret = 0x37;
2418 		break;
2419 	case 22:
2420 		ret = 0x6e;
2421 		break;
2422 	/* OFDM rates */
2423 	case 12:
2424 		ret = 0xd;
2425 		break;
2426 	case 18:
2427 		ret = 0xf;
2428 		break;
2429 	case 24:
2430 		ret = 0x5;
2431 		break;
2432 	case 36:
2433 		ret = 0x7;
2434 		break;
2435 	case 48:
2436 		ret = 0x9;
2437 		break;
2438 	case 72:
2439 		ret = 0xb;
2440 		break;
2441 	case 96:
2442 		ret = 0x1;
2443 		break;
2444 	case 108:
2445 		ret = 0x3;
2446 		break;
2447 	default:
2448 		ret = 0;
2449 		break;
2450 	}
2451 	return (ret);
2452 }
2453 
2454 static mblk_t *
2455 iwk_m_tx(void *arg, mblk_t *mp)
2456 {
2457 	iwk_sc_t	*sc = (iwk_sc_t *)arg;
2458 	ieee80211com_t	*ic = &sc->sc_ic;
2459 	mblk_t			*next;
2460 
2461 	if (sc->sc_flags & IWK_F_SUSPEND) {
2462 		freemsgchain(mp);
2463 		return (NULL);
2464 	}
2465 
2466 	if (ic->ic_state != IEEE80211_S_RUN) {
2467 		freemsgchain(mp);
2468 		return (NULL);
2469 	}
2470 
2471 	while (mp != NULL) {
2472 		next = mp->b_next;
2473 		mp->b_next = NULL;
2474 		if (iwk_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) {
2475 			mp->b_next = next;
2476 			break;
2477 		}
2478 		mp = next;
2479 	}
2480 	return (mp);
2481 }
2482 
2483 /* ARGSUSED */
2484 static int
2485 iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
2486 {
2487 	iwk_sc_t *sc = (iwk_sc_t *)ic;
2488 	iwk_tx_ring_t *ring;
2489 	iwk_tx_desc_t *desc;
2490 	iwk_tx_data_t *data;
2491 	iwk_cmd_t *cmd;
2492 	iwk_tx_cmd_t *tx;
2493 	ieee80211_node_t *in;
2494 	struct ieee80211_frame *wh;
2495 	struct ieee80211_key *k = NULL;
2496 	mblk_t *m, *m0;
2497 	int rate, hdrlen, len, len0, mblen, off, err = IWK_SUCCESS;
2498 	uint16_t masks = 0;
2499 
2500 	ring = &sc->sc_txq[0];
2501 	data = &ring->data[ring->cur];
2502 	desc = data->desc;
2503 	cmd = data->cmd;
2504 	bzero(desc, sizeof (*desc));
2505 	bzero(cmd, sizeof (*cmd));
2506 
2507 	mutex_enter(&sc->sc_tx_lock);
2508 	if (sc->sc_flags & IWK_F_SUSPEND) {
2509 		mutex_exit(&sc->sc_tx_lock);
2510 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
2511 		    IEEE80211_FC0_TYPE_DATA) {
2512 			freemsg(mp);
2513 		}
2514 		err = IWK_FAIL;
2515 		goto exit;
2516 	}
2517 
2518 	if (ring->queued > ring->count - 64) {
2519 		IWK_DBG((IWK_DEBUG_TX, "iwk_send(): no txbuf\n"));
2520 		sc->sc_need_reschedule = 1;
2521 		mutex_exit(&sc->sc_tx_lock);
2522 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
2523 		    IEEE80211_FC0_TYPE_DATA) {
2524 			freemsg(mp);
2525 		}
2526 		sc->sc_tx_nobuf++;
2527 		err = IWK_FAIL;
2528 		goto exit;
2529 	}
2530 	mutex_exit(&sc->sc_tx_lock);
2531 
2532 	hdrlen = sizeof (struct ieee80211_frame);
2533 
2534 	m = allocb(msgdsize(mp) + 32, BPRI_MED);
2535 	if (m == NULL) { /* can not alloc buf, drop this package */
2536 		cmn_err(CE_WARN,
2537 		    "iwk_send(): failed to allocate msgbuf\n");
2538 		freemsg(mp);
2539 		err = IWK_SUCCESS;
2540 		goto exit;
2541 	}
2542 	for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) {
2543 		mblen = MBLKL(m0);
2544 		(void) memcpy(m->b_rptr + off, m0->b_rptr, mblen);
2545 		off += mblen;
2546 	}
2547 	m->b_wptr += off;
2548 	freemsg(mp);
2549 
2550 	wh = (struct ieee80211_frame *)m->b_rptr;
2551 
2552 	in = ieee80211_find_txnode(ic, wh->i_addr1);
2553 	if (in == NULL) {
2554 		cmn_err(CE_WARN, "iwk_send(): failed to find tx node\n");
2555 		freemsg(m);
2556 		sc->sc_tx_err++;
2557 		err = IWK_SUCCESS;
2558 		goto exit;
2559 	}
2560 	(void) ieee80211_encap(ic, m, in);
2561 
2562 	cmd->hdr.type = REPLY_TX;
2563 	cmd->hdr.flags = 0;
2564 	cmd->hdr.qid = ring->qid;
2565 	cmd->hdr.idx = ring->cur;
2566 
2567 	tx = (iwk_tx_cmd_t *)cmd->data;
2568 	tx->tx_flags = 0;
2569 
2570 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2571 		tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK));
2572 	} else {
2573 		tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
2574 	}
2575 
2576 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
2577 		k = ieee80211_crypto_encap(ic, m);
2578 		if (k == NULL) {
2579 			freemsg(m);
2580 			sc->sc_tx_err++;
2581 			err = IWK_SUCCESS;
2582 			goto exit;
2583 		}
2584 
2585 		if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
2586 			tx->sec_ctl = 2; /* for CCMP */
2587 			tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
2588 			(void) memcpy(&tx->key, k->wk_key, k->wk_keylen);
2589 		}
2590 
2591 		/* packet header may have moved, reset our local pointer */
2592 		wh = (struct ieee80211_frame *)m->b_rptr;
2593 	}
2594 
2595 	len = msgdsize(m);
2596 
2597 #ifdef DEBUG
2598 	if (iwk_dbg_flags & IWK_DEBUG_TX)
2599 		ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0);
2600 #endif
2601 
2602 	/* pickup a rate */
2603 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
2604 	    IEEE80211_FC0_TYPE_MGT) {
2605 		/* mgmt frames are sent at 1M */
2606 		rate = in->in_rates.ir_rates[0];
2607 	} else {
2608 		/*
2609 		 * do it here for the software way rate control.
2610 		 * later for rate scaling in hardware.
2611 		 * maybe like the following, for management frame:
2612 		 * tx->initial_rate_index = LINK_QUAL_MAX_RETRY_NUM - 1;
2613 		 * for data frame:
2614 		 * tx->tx_flags |= (LE_32(TX_CMD_FLG_STA_RATE_MSK));
2615 		 * rate = in->in_rates.ir_rates[in->in_txrate];
2616 		 * tx->initial_rate_index = 1;
2617 		 *
2618 		 * now the txrate is determined in tx cmd flags, set to the
2619 		 * max value 54M for 11g and 11M for 11b.
2620 		 */
2621 
2622 		if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
2623 			rate = ic->ic_fixed_rate;
2624 		} else {
2625 			rate = in->in_rates.ir_rates[in->in_txrate];
2626 		}
2627 	}
2628 	rate &= IEEE80211_RATE_VAL;
2629 	IWK_DBG((IWK_DEBUG_TX, "tx rate[%d of %d] = %x",
2630 	    in->in_txrate, in->in_rates.ir_nrates, rate));
2631 
2632 	tx->tx_flags |= (LE_32(TX_CMD_FLG_SEQ_CTL_MSK));
2633 
2634 	len0 = roundup(4 + sizeof (iwk_tx_cmd_t) + hdrlen, 4);
2635 	if (len0 != (4 + sizeof (iwk_tx_cmd_t) + hdrlen))
2636 		tx->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
2637 
2638 	/* retrieve destination node's id */
2639 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2640 		tx->sta_id = IWK_BROADCAST_ID;
2641 	} else {
2642 		if (ic->ic_opmode != IEEE80211_M_IBSS)
2643 			tx->sta_id = IWK_AP_ID;
2644 	}
2645 
2646 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
2647 	    IEEE80211_FC0_TYPE_MGT) {
2648 		/* tell h/w to set timestamp in probe responses */
2649 		if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2650 		    IEEE80211_FC0_SUBTYPE_PROBE_RESP)
2651 			tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK);
2652 
2653 		if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2654 		    IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
2655 		    ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2656 		    IEEE80211_FC0_SUBTYPE_REASSOC_REQ))
2657 			tx->timeout.pm_frame_timeout = 3;
2658 		else
2659 			tx->timeout.pm_frame_timeout = 2;
2660 	} else
2661 		tx->timeout.pm_frame_timeout = 0;
2662 	if (rate == 2 || rate == 4 || rate == 11 || rate == 22)
2663 		masks |= RATE_MCS_CCK_MSK;
2664 
2665 	masks |= RATE_MCS_ANT_B_MSK;
2666 	tx->rate.r.rate_n_flags = (iwk_rate_to_plcp(rate) | masks);
2667 
2668 	IWK_DBG((IWK_DEBUG_TX, "tx flag = %x",
2669 	    tx->tx_flags));
2670 
2671 	tx->rts_retry_limit = 60;
2672 	tx->data_retry_limit = 15;
2673 
2674 	tx->stop_time.life_time  = LE_32(0xffffffff);
2675 
2676 	tx->len = LE_16(len);
2677 
2678 	tx->dram_lsb_ptr =
2679 	    data->paddr_cmd + 4 + offsetof(iwk_tx_cmd_t, scratch);
2680 	tx->dram_msb_ptr = 0;
2681 	tx->driver_txop = 0;
2682 	tx->next_frame_len = 0;
2683 
2684 	(void) memcpy(tx + 1, m->b_rptr, hdrlen);
2685 	m->b_rptr += hdrlen;
2686 	(void) memcpy(data->dma_data.mem_va, m->b_rptr, len - hdrlen);
2687 
2688 	IWK_DBG((IWK_DEBUG_TX, "sending data: qid=%d idx=%d len=%d",
2689 	    ring->qid, ring->cur, len));
2690 
2691 	/*
2692 	 * first segment includes the tx cmd plus the 802.11 header,
2693 	 * the second includes the remaining of the 802.11 frame.
2694 	 */
2695 	desc->val0 = LE_32(2 << 24);
2696 	desc->pa[0].tb1_addr = LE_32(data->paddr_cmd);
2697 	desc->pa[0].val1 = ((len0 << 4) & 0xfff0) |
2698 	    ((data->dma_data.cookie.dmac_address & 0xffff) << 16);
2699 	desc->pa[0].val2 =
2700 	    ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) |
2701 	    ((len - hdrlen) << 20);
2702 	IWK_DBG((IWK_DEBUG_TX, "phy addr1 = 0x%x phy addr2 = 0x%x "
2703 	    "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x",
2704 	    data->paddr_cmd, data->dma_data.cookie.dmac_address,
2705 	    len0, len - hdrlen, desc->pa[0].val1, desc->pa[0].val2));
2706 
2707 	mutex_enter(&sc->sc_tx_lock);
2708 	ring->queued++;
2709 	mutex_exit(&sc->sc_tx_lock);
2710 
2711 	/* kick ring */
2712 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
2713 	    tfd_offset[ring->cur].val = 8 + len;
2714 	if (ring->cur < IWK_MAX_WIN_SIZE) {
2715 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
2716 		    tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8 + len;
2717 	}
2718 
2719 	IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
2720 	IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
2721 
2722 	ring->cur = (ring->cur + 1) % ring->count;
2723 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
2724 	freemsg(m);
2725 	/* release node reference */
2726 	ieee80211_free_node(in);
2727 
2728 	ic->ic_stats.is_tx_bytes += len;
2729 	ic->ic_stats.is_tx_frags++;
2730 
2731 	if (sc->sc_tx_timer == 0)
2732 		sc->sc_tx_timer = 10;
2733 exit:
2734 	return (err);
2735 }
2736 
2737 static void
2738 iwk_m_ioctl(void* arg, queue_t *wq, mblk_t *mp)
2739 {
2740 	iwk_sc_t	*sc  = (iwk_sc_t *)arg;
2741 	ieee80211com_t	*ic = &sc->sc_ic;
2742 	int		err;
2743 
2744 	err = ieee80211_ioctl(ic, wq, mp);
2745 
2746 	if (err == ENETRESET) {
2747 		/*
2748 		 * This is special for the hidden AP connection.
2749 		 * In any case, we should make sure only one 'scan'
2750 		 * in the driver for a 'connect' CLI command. So
2751 		 * when connecting to a hidden AP, the scan is just
2752 		 * sent out to the air when we know the desired
2753 		 * essid of the AP we want to connect.
2754 		 */
2755 		if (ic->ic_des_esslen) {
2756 			if (sc->sc_flags & IWK_F_RUNNING) {
2757 				iwk_m_stop(sc);
2758 				(void) iwk_m_start(sc);
2759 				(void) ieee80211_new_state(ic,
2760 				    IEEE80211_S_SCAN, -1);
2761 			}
2762 		}
2763 	}
2764 }
2765 
2766 /*
2767  * callback functions for set/get properties
2768  */
2769 /* ARGSUSED */
2770 static int
2771 iwk_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
2772     uint_t pr_flags, uint_t wldp_length, void *wldp_buf, uint_t *perm)
2773 {
2774 	int		err = 0;
2775 	iwk_sc_t	*sc = (iwk_sc_t *)arg;
2776 
2777 	err = ieee80211_getprop(&sc->sc_ic, pr_name, wldp_pr_num,
2778 	    pr_flags, wldp_length, wldp_buf, perm);
2779 
2780 	return (err);
2781 }
2782 static int
2783 iwk_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
2784     uint_t wldp_length, const void *wldp_buf)
2785 {
2786 	int		err;
2787 	iwk_sc_t	*sc = (iwk_sc_t *)arg;
2788 	ieee80211com_t	*ic = &sc->sc_ic;
2789 
2790 	err = ieee80211_setprop(ic, pr_name, wldp_pr_num, wldp_length,
2791 	    wldp_buf);
2792 
2793 	if (err == ENETRESET) {
2794 		if (ic->ic_des_esslen) {
2795 			if (sc->sc_flags & IWK_F_RUNNING) {
2796 				iwk_m_stop(sc);
2797 				(void) iwk_m_start(sc);
2798 				(void) ieee80211_new_state(ic,
2799 				    IEEE80211_S_SCAN, -1);
2800 			}
2801 		}
2802 		err = 0;
2803 	}
2804 
2805 	return (err);
2806 }
2807 
2808 /*ARGSUSED*/
2809 static int
2810 iwk_m_stat(void *arg, uint_t stat, uint64_t *val)
2811 {
2812 	iwk_sc_t	*sc  = (iwk_sc_t *)arg;
2813 	ieee80211com_t	*ic = &sc->sc_ic;
2814 	ieee80211_node_t *in = ic->ic_bss;
2815 	struct ieee80211_rateset *rs = &in->in_rates;
2816 
2817 	mutex_enter(&sc->sc_glock);
2818 	switch (stat) {
2819 	case MAC_STAT_IFSPEED:
2820 		*val = ((ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) ?
2821 		    (rs->ir_rates[in->in_txrate] & IEEE80211_RATE_VAL)
2822 		    : ic->ic_fixed_rate) /2 * 1000000;
2823 		break;
2824 	case MAC_STAT_NOXMTBUF:
2825 		*val = sc->sc_tx_nobuf;
2826 		break;
2827 	case MAC_STAT_NORCVBUF:
2828 		*val = sc->sc_rx_nobuf;
2829 		break;
2830 	case MAC_STAT_IERRORS:
2831 		*val = sc->sc_rx_err;
2832 		break;
2833 	case MAC_STAT_RBYTES:
2834 		*val = ic->ic_stats.is_rx_bytes;
2835 		break;
2836 	case MAC_STAT_IPACKETS:
2837 		*val = ic->ic_stats.is_rx_frags;
2838 		break;
2839 	case MAC_STAT_OBYTES:
2840 		*val = ic->ic_stats.is_tx_bytes;
2841 		break;
2842 	case MAC_STAT_OPACKETS:
2843 		*val = ic->ic_stats.is_tx_frags;
2844 		break;
2845 	case MAC_STAT_OERRORS:
2846 	case WIFI_STAT_TX_FAILED:
2847 		*val = sc->sc_tx_err;
2848 		break;
2849 	case WIFI_STAT_TX_RETRANS:
2850 		*val = sc->sc_tx_retries;
2851 		break;
2852 	case WIFI_STAT_FCS_ERRORS:
2853 	case WIFI_STAT_WEP_ERRORS:
2854 	case WIFI_STAT_TX_FRAGS:
2855 	case WIFI_STAT_MCAST_TX:
2856 	case WIFI_STAT_RTS_SUCCESS:
2857 	case WIFI_STAT_RTS_FAILURE:
2858 	case WIFI_STAT_ACK_FAILURE:
2859 	case WIFI_STAT_RX_FRAGS:
2860 	case WIFI_STAT_MCAST_RX:
2861 	case WIFI_STAT_RX_DUPS:
2862 		mutex_exit(&sc->sc_glock);
2863 		return (ieee80211_stat(ic, stat, val));
2864 	default:
2865 		mutex_exit(&sc->sc_glock);
2866 		return (ENOTSUP);
2867 	}
2868 	mutex_exit(&sc->sc_glock);
2869 
2870 	return (IWK_SUCCESS);
2871 
2872 }
2873 
2874 static int
2875 iwk_m_start(void *arg)
2876 {
2877 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2878 	ieee80211com_t	*ic = &sc->sc_ic;
2879 	int err;
2880 
2881 	err = iwk_init(sc);
2882 
2883 	if (err != IWK_SUCCESS) {
2884 		/*
2885 		 * The hw init err(eg. RF is OFF). Return Success to make
2886 		 * the 'plumb' succeed. The iwk_thread() tries to re-init
2887 		 * background.
2888 		 */
2889 		cmn_err(CE_WARN, "iwk_m_start(): failed to initialize "
2890 		    "hardware\n");
2891 		mutex_enter(&sc->sc_glock);
2892 		sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
2893 		mutex_exit(&sc->sc_glock);
2894 		return (IWK_SUCCESS);
2895 	}
2896 
2897 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2898 
2899 	mutex_enter(&sc->sc_glock);
2900 	sc->sc_flags |= IWK_F_RUNNING;
2901 	mutex_exit(&sc->sc_glock);
2902 
2903 	return (IWK_SUCCESS);
2904 }
2905 
2906 static void
2907 iwk_m_stop(void *arg)
2908 {
2909 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2910 	ieee80211com_t	*ic = &sc->sc_ic;
2911 
2912 	iwk_stop(sc);
2913 	ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2914 	mutex_enter(&sc->sc_mt_lock);
2915 	sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
2916 	sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL;
2917 	mutex_exit(&sc->sc_mt_lock);
2918 	mutex_enter(&sc->sc_glock);
2919 	sc->sc_flags &= ~IWK_F_RUNNING;
2920 	sc->sc_flags &= ~IWK_F_SCANNING;
2921 	mutex_exit(&sc->sc_glock);
2922 }
2923 
2924 /*ARGSUSED*/
2925 static int
2926 iwk_m_unicst(void *arg, const uint8_t *macaddr)
2927 {
2928 	iwk_sc_t *sc = (iwk_sc_t *)arg;
2929 	ieee80211com_t	*ic = &sc->sc_ic;
2930 	int err;
2931 
2932 	if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) {
2933 		IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr);
2934 		mutex_enter(&sc->sc_glock);
2935 		err = iwk_config(sc);
2936 		mutex_exit(&sc->sc_glock);
2937 		if (err != IWK_SUCCESS) {
2938 			cmn_err(CE_WARN,
2939 			    "iwk_m_unicst(): "
2940 			    "failed to configure device\n");
2941 			goto fail;
2942 		}
2943 	}
2944 	return (IWK_SUCCESS);
2945 fail:
2946 	return (err);
2947 }
2948 
2949 /*ARGSUSED*/
2950 static int
2951 iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m)
2952 {
2953 	return (IWK_SUCCESS);
2954 }
2955 
2956 /*ARGSUSED*/
2957 static int
2958 iwk_m_promisc(void *arg, boolean_t on)
2959 {
2960 	return (IWK_SUCCESS);
2961 }
2962 
2963 static void
2964 iwk_thread(iwk_sc_t *sc)
2965 {
2966 	ieee80211com_t	*ic = &sc->sc_ic;
2967 	clock_t clk;
2968 	int times = 0, err, n = 0, timeout = 0;
2969 	uint32_t tmp;
2970 
2971 	mutex_enter(&sc->sc_mt_lock);
2972 	while (sc->sc_mf_thread_switch) {
2973 		tmp = IWK_READ(sc, CSR_GP_CNTRL);
2974 		if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) {
2975 			sc->sc_flags &= ~IWK_F_RADIO_OFF;
2976 		} else {
2977 			sc->sc_flags |= IWK_F_RADIO_OFF;
2978 		}
2979 		/*
2980 		 * If in SUSPEND or the RF is OFF, do nothing
2981 		 */
2982 		if ((sc->sc_flags & IWK_F_SUSPEND) ||
2983 		    (sc->sc_flags & IWK_F_RADIO_OFF)) {
2984 			mutex_exit(&sc->sc_mt_lock);
2985 			delay(drv_usectohz(100000));
2986 			mutex_enter(&sc->sc_mt_lock);
2987 			continue;
2988 		}
2989 
2990 		/*
2991 		 * recovery fatal error
2992 		 */
2993 		if (ic->ic_mach &&
2994 		    (sc->sc_flags & IWK_F_HW_ERR_RECOVER)) {
2995 
2996 			IWK_DBG((IWK_DEBUG_FW,
2997 			    "iwk_thread(): "
2998 			    "try to recover fatal hw error: %d\n", times++));
2999 
3000 			iwk_stop(sc);
3001 
3002 			mutex_exit(&sc->sc_mt_lock);
3003 			ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3004 			delay(drv_usectohz(2000000 + n*500000));
3005 			mutex_enter(&sc->sc_mt_lock);
3006 
3007 			err = iwk_init(sc);
3008 			if (err != IWK_SUCCESS) {
3009 				n++;
3010 				if (n < 20)
3011 					continue;
3012 			}
3013 			n = 0;
3014 			if (!err)
3015 				sc->sc_flags |= IWK_F_RUNNING;
3016 			sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
3017 			mutex_exit(&sc->sc_mt_lock);
3018 			delay(drv_usectohz(2000000));
3019 			if (sc->sc_ostate != IEEE80211_S_INIT)
3020 				ieee80211_new_state(ic, IEEE80211_S_SCAN, 0);
3021 			mutex_enter(&sc->sc_mt_lock);
3022 		}
3023 
3024 		if (ic->ic_mach &&
3025 		    (sc->sc_flags & IWK_F_SCANNING) && sc->sc_scan_pending) {
3026 
3027 			IWK_DBG((IWK_DEBUG_SCAN,
3028 			    "iwk_thread(): "
3029 			    "wait for probe response\n"));
3030 
3031 			sc->sc_scan_pending--;
3032 			mutex_exit(&sc->sc_mt_lock);
3033 			delay(drv_usectohz(200000));
3034 			ieee80211_next_scan(ic);
3035 			mutex_enter(&sc->sc_mt_lock);
3036 		}
3037 
3038 		/*
3039 		 * rate ctl
3040 		 */
3041 		if (ic->ic_mach &&
3042 		    (sc->sc_flags & IWK_F_RATE_AUTO_CTL)) {
3043 			clk = ddi_get_lbolt();
3044 			if (clk > sc->sc_clk + drv_usectohz(500000)) {
3045 				iwk_amrr_timeout(sc);
3046 			}
3047 		}
3048 
3049 		mutex_exit(&sc->sc_mt_lock);
3050 		delay(drv_usectohz(100000));
3051 		mutex_enter(&sc->sc_mt_lock);
3052 
3053 		if (sc->sc_tx_timer) {
3054 			timeout++;
3055 			if (timeout == 10) {
3056 				sc->sc_tx_timer--;
3057 				if (sc->sc_tx_timer == 0) {
3058 					sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
3059 					sc->sc_ostate = IEEE80211_S_RUN;
3060 					IWK_DBG((IWK_DEBUG_FW,
3061 					    "iwk_thread(): try to recover from"
3062 					    " 'send fail\n"));
3063 				}
3064 				timeout = 0;
3065 			}
3066 		}
3067 
3068 	}
3069 	sc->sc_mf_thread = NULL;
3070 	cv_signal(&sc->sc_mt_cv);
3071 	mutex_exit(&sc->sc_mt_lock);
3072 }
3073 
3074 
3075 /*
3076  * Send a command to the firmware.
3077  */
3078 static int
3079 iwk_cmd(iwk_sc_t *sc, int code, const void *buf, int size, int async)
3080 {
3081 	iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM];
3082 	iwk_tx_desc_t *desc;
3083 	iwk_cmd_t *cmd;
3084 	clock_t clk;
3085 
3086 	ASSERT(size <= sizeof (cmd->data));
3087 	ASSERT(mutex_owned(&sc->sc_glock));
3088 
3089 	IWK_DBG((IWK_DEBUG_CMD, "iwk_cmd() code[%d]", code));
3090 	desc = ring->data[ring->cur].desc;
3091 	cmd = ring->data[ring->cur].cmd;
3092 
3093 	cmd->hdr.type = (uint8_t)code;
3094 	cmd->hdr.flags = 0;
3095 	cmd->hdr.qid = ring->qid;
3096 	cmd->hdr.idx = ring->cur;
3097 	(void) memcpy(cmd->data, buf, size);
3098 	(void) memset(desc, 0, sizeof (*desc));
3099 
3100 	desc->val0 = LE_32(1 << 24);
3101 	desc->pa[0].tb1_addr =
3102 	    (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff);
3103 	desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0;
3104 
3105 	/* kick cmd ring XXX */
3106 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3107 	    tfd_offset[ring->cur].val = 8;
3108 	if (ring->cur < IWK_MAX_WIN_SIZE) {
3109 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3110 		    tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8;
3111 	}
3112 	ring->cur = (ring->cur + 1) % ring->count;
3113 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3114 
3115 	if (async)
3116 		return (IWK_SUCCESS);
3117 	else {
3118 		sc->sc_flags &= ~IWK_F_CMD_DONE;
3119 		clk = ddi_get_lbolt() + drv_usectohz(2000000);
3120 		while (!(sc->sc_flags & IWK_F_CMD_DONE)) {
3121 			if (cv_timedwait(&sc->sc_cmd_cv, &sc->sc_glock, clk) <
3122 			    0)
3123 				break;
3124 		}
3125 		if (sc->sc_flags & IWK_F_CMD_DONE)
3126 			return (IWK_SUCCESS);
3127 		else
3128 			return (IWK_FAIL);
3129 	}
3130 }
3131 
3132 static void
3133 iwk_set_led(iwk_sc_t *sc, uint8_t id, uint8_t off, uint8_t on)
3134 {
3135 	iwk_led_cmd_t led;
3136 
3137 	led.interval = LE_32(100000);	/* unit: 100ms */
3138 	led.id = id;
3139 	led.off = off;
3140 	led.on = on;
3141 
3142 	(void) iwk_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1);
3143 }
3144 
3145 static int
3146 iwk_hw_set_before_auth(iwk_sc_t *sc)
3147 {
3148 	ieee80211com_t *ic = &sc->sc_ic;
3149 	ieee80211_node_t *in = ic->ic_bss;
3150 	iwk_add_sta_t node;
3151 	iwk_link_quality_cmd_t link_quality;
3152 	struct ieee80211_rateset rs;
3153 	uint16_t masks = 0, rate;
3154 	int i, err;
3155 
3156 	/* update adapter's configuration according the info of target AP */
3157 	IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
3158 	sc->sc_config.chan = ieee80211_chan2ieee(ic, in->in_chan);
3159 	if (ic->ic_curmode == IEEE80211_MODE_11B) {
3160 		sc->sc_config.cck_basic_rates  = 0x03;
3161 		sc->sc_config.ofdm_basic_rates = 0;
3162 	} else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
3163 	    (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
3164 		sc->sc_config.cck_basic_rates  = 0;
3165 		sc->sc_config.ofdm_basic_rates = 0x15;
3166 	} else { /* assume 802.11b/g */
3167 		sc->sc_config.cck_basic_rates  = 0x0f;
3168 		sc->sc_config.ofdm_basic_rates = 0xff;
3169 	}
3170 
3171 	sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
3172 	    RXON_FLG_SHORT_SLOT_MSK);
3173 
3174 	if (ic->ic_flags & IEEE80211_F_SHSLOT)
3175 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK);
3176 	else
3177 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK);
3178 
3179 	if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
3180 		sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
3181 	else
3182 		sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK);
3183 
3184 	IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x "
3185 	    "filter_flags %x  cck %x ofdm %x"
3186 	    " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n",
3187 	    sc->sc_config.chan, sc->sc_config.flags,
3188 	    sc->sc_config.filter_flags,
3189 	    sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates,
3190 	    sc->sc_config.bssid[0], sc->sc_config.bssid[1],
3191 	    sc->sc_config.bssid[2], sc->sc_config.bssid[3],
3192 	    sc->sc_config.bssid[4], sc->sc_config.bssid[5]));
3193 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
3194 	    sizeof (iwk_rxon_cmd_t), 1);
3195 	if (err != IWK_SUCCESS) {
3196 		cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
3197 		    " failed to config chan%d\n",
3198 		    sc->sc_config.chan);
3199 		return (err);
3200 	}
3201 
3202 	/* obtain current temperature of chipset */
3203 	sc->sc_tempera = iwk_curr_tempera(sc);
3204 
3205 	/* make Tx power calibration to determine the gains of DSP and radio */
3206 	err = iwk_tx_power_calibration(sc);
3207 	if (err) {
3208 		cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
3209 		    "failed to set tx power table\n");
3210 		return (err);
3211 	}
3212 
3213 	/* add default AP node */
3214 	(void) memset(&node, 0, sizeof (node));
3215 	IEEE80211_ADDR_COPY(node.bssid, in->in_bssid);
3216 	node.id = IWK_AP_ID;
3217 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
3218 	if (err != IWK_SUCCESS) {
3219 		cmn_err(CE_WARN, "iwk_hw_set_before_auth(): "
3220 		    "failed to add BSS node\n");
3221 		return (err);
3222 	}
3223 
3224 	/* TX_LINK_QUALITY cmd ? */
3225 	(void) memset(&link_quality, 0, sizeof (link_quality));
3226 	rs = ic->ic_sup_rates[ieee80211_chan2mode(ic, ic->ic_curchan)];
3227 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3228 		if (i < rs.ir_nrates)
3229 			rate = rs.ir_rates[rs.ir_nrates - i];
3230 		else
3231 			rate = 2;
3232 		if (rate == 2 || rate == 4 || rate == 11 || rate == 22)
3233 			masks |= RATE_MCS_CCK_MSK;
3234 		masks |= RATE_MCS_ANT_B_MSK;
3235 		masks &= ~RATE_MCS_ANT_A_MSK;
3236 		link_quality.rate_n_flags[i] =
3237 		    iwk_rate_to_plcp(rate) | masks;
3238 	}
3239 
3240 	link_quality.general_params.single_stream_ant_msk = 2;
3241 	link_quality.general_params.dual_stream_ant_msk = 3;
3242 	link_quality.agg_params.agg_dis_start_th = 3;
3243 	link_quality.agg_params.agg_time_limit = LE_16(4000);
3244 	link_quality.sta_id = IWK_AP_ID;
3245 	err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
3246 	    sizeof (link_quality), 1);
3247 	if (err != IWK_SUCCESS) {
3248 		cmn_err(CE_WARN, "iwk_hw_set_before_auth(): "
3249 		    "failed to config link quality table\n");
3250 		return (err);
3251 	}
3252 
3253 	return (IWK_SUCCESS);
3254 }
3255 
3256 /*
3257  * Send a scan request(assembly scan cmd) to the firmware.
3258  */
3259 static int
3260 iwk_scan(iwk_sc_t *sc)
3261 {
3262 	ieee80211com_t *ic = &sc->sc_ic;
3263 	iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM];
3264 	iwk_tx_desc_t *desc;
3265 	iwk_tx_data_t *data;
3266 	iwk_cmd_t *cmd;
3267 	iwk_scan_hdr_t *hdr;
3268 	iwk_scan_chan_t *chan;
3269 	struct ieee80211_frame *wh;
3270 	ieee80211_node_t *in = ic->ic_bss;
3271 	uint8_t essid[IEEE80211_NWID_LEN+1];
3272 	struct ieee80211_rateset *rs;
3273 	enum ieee80211_phymode mode;
3274 	uint8_t *frm;
3275 	int i, pktlen, nrates;
3276 
3277 	data = &ring->data[ring->cur];
3278 	desc = data->desc;
3279 	cmd = (iwk_cmd_t *)data->dma_data.mem_va;
3280 
3281 	cmd->hdr.type = REPLY_SCAN_CMD;
3282 	cmd->hdr.flags = 0;
3283 	cmd->hdr.qid = ring->qid;
3284 	cmd->hdr.idx = ring->cur | 0x40;
3285 
3286 	hdr = (iwk_scan_hdr_t *)cmd->data;
3287 	(void) memset(hdr, 0, sizeof (iwk_scan_hdr_t));
3288 	hdr->nchan = 1;
3289 	hdr->quiet_time = LE_16(50);
3290 	hdr->quiet_plcp_th = LE_16(1);
3291 
3292 	hdr->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
3293 	hdr->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3294 	    LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) |
3295 	    (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3296 	    (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3297 
3298 	hdr->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
3299 	hdr->tx_cmd.sta_id = IWK_BROADCAST_ID;
3300 	hdr->tx_cmd.stop_time.life_time = 0xffffffff;
3301 	hdr->tx_cmd.tx_flags |= (0x200);
3302 	hdr->tx_cmd.rate.r.rate_n_flags = iwk_rate_to_plcp(2);
3303 	hdr->tx_cmd.rate.r.rate_n_flags |=
3304 	    (RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK);
3305 	hdr->direct_scan[0].len = ic->ic_des_esslen;
3306 	hdr->direct_scan[0].id  = IEEE80211_ELEMID_SSID;
3307 
3308 	if (ic->ic_des_esslen) {
3309 		bcopy(ic->ic_des_essid, essid, ic->ic_des_esslen);
3310 		essid[ic->ic_des_esslen] = '\0';
3311 		IWK_DBG((IWK_DEBUG_SCAN, "directed scan %s\n", essid));
3312 
3313 		bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid,
3314 		    ic->ic_des_esslen);
3315 	} else {
3316 		bzero(hdr->direct_scan[0].ssid,
3317 		    sizeof (hdr->direct_scan[0].ssid));
3318 	}
3319 	/*
3320 	 * a probe request frame is required after the REPLY_SCAN_CMD
3321 	 */
3322 	wh = (struct ieee80211_frame *)(hdr + 1);
3323 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
3324 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
3325 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
3326 	(void) memset(wh->i_addr1, 0xff, 6);
3327 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr);
3328 	(void) memset(wh->i_addr3, 0xff, 6);
3329 	*(uint16_t *)&wh->i_dur[0] = 0;
3330 	*(uint16_t *)&wh->i_seq[0] = 0;
3331 
3332 	frm = (uint8_t *)(wh + 1);
3333 
3334 	/* essid IE */
3335 	if (in->in_esslen) {
3336 		bcopy(in->in_essid, essid, in->in_esslen);
3337 		essid[in->in_esslen] = '\0';
3338 		IWK_DBG((IWK_DEBUG_SCAN, "probe with ESSID %s\n",
3339 		    essid));
3340 	}
3341 	*frm++ = IEEE80211_ELEMID_SSID;
3342 	*frm++ = in->in_esslen;
3343 	(void) memcpy(frm, in->in_essid, in->in_esslen);
3344 	frm += in->in_esslen;
3345 
3346 	mode = ieee80211_chan2mode(ic, ic->ic_curchan);
3347 	rs = &ic->ic_sup_rates[mode];
3348 
3349 	/* supported rates IE */
3350 	*frm++ = IEEE80211_ELEMID_RATES;
3351 	nrates = rs->ir_nrates;
3352 	if (nrates > IEEE80211_RATE_SIZE)
3353 		nrates = IEEE80211_RATE_SIZE;
3354 	*frm++ = (uint8_t)nrates;
3355 	(void) memcpy(frm, rs->ir_rates, nrates);
3356 	frm += nrates;
3357 
3358 	/* supported xrates IE */
3359 	if (rs->ir_nrates > IEEE80211_RATE_SIZE) {
3360 		nrates = rs->ir_nrates - IEEE80211_RATE_SIZE;
3361 		*frm++ = IEEE80211_ELEMID_XRATES;
3362 		*frm++ = (uint8_t)nrates;
3363 		(void) memcpy(frm, rs->ir_rates + IEEE80211_RATE_SIZE, nrates);
3364 		frm += nrates;
3365 	}
3366 
3367 	/* optionnal IE (usually for wpa) */
3368 	if (ic->ic_opt_ie != NULL) {
3369 		(void) memcpy(frm, ic->ic_opt_ie, ic->ic_opt_ie_len);
3370 		frm += ic->ic_opt_ie_len;
3371 	}
3372 
3373 	/* setup length of probe request */
3374 	hdr->tx_cmd.len = LE_16(_PTRDIFF(frm, wh));
3375 	hdr->len = hdr->nchan * sizeof (iwk_scan_chan_t) +
3376 	    hdr->tx_cmd.len + sizeof (iwk_scan_hdr_t);
3377 
3378 	/*
3379 	 * the attribute of the scan channels are required after the probe
3380 	 * request frame.
3381 	 */
3382 	chan = (iwk_scan_chan_t *)frm;
3383 	for (i = 1; i <= hdr->nchan; i++, chan++) {
3384 		if (ic->ic_des_esslen) {
3385 			chan->type = 3;
3386 		} else {
3387 			chan->type = 1;
3388 		}
3389 
3390 		chan->chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
3391 		chan->tpc.tx_gain = 0x3f;
3392 		chan->tpc.dsp_atten = 110;
3393 		chan->active_dwell = LE_16(50);
3394 		chan->passive_dwell = LE_16(120);
3395 
3396 		frm += sizeof (iwk_scan_chan_t);
3397 	}
3398 
3399 	pktlen = _PTRDIFF(frm, cmd);
3400 
3401 	(void) memset(desc, 0, sizeof (*desc));
3402 	desc->val0 = LE_32(1 << 24);
3403 	desc->pa[0].tb1_addr =
3404 	    (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff);
3405 	desc->pa[0].val1 = (pktlen << 4) & 0xfff0;
3406 
3407 	/*
3408 	 * maybe for cmd, filling the byte cnt table is not necessary.
3409 	 * anyway, we fill it here.
3410 	 */
3411 	sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3412 	    tfd_offset[ring->cur].val = 8;
3413 	if (ring->cur < IWK_MAX_WIN_SIZE) {
3414 		sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3415 		    tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8;
3416 	}
3417 
3418 	/* kick cmd ring */
3419 	ring->cur = (ring->cur + 1) % ring->count;
3420 	IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3421 
3422 	return (IWK_SUCCESS);
3423 }
3424 
3425 static int
3426 iwk_config(iwk_sc_t *sc)
3427 {
3428 	ieee80211com_t *ic = &sc->sc_ic;
3429 	iwk_powertable_cmd_t powertable;
3430 	iwk_bt_cmd_t bt;
3431 	iwk_add_sta_t node;
3432 	iwk_link_quality_cmd_t link_quality;
3433 	int i, err;
3434 	uint16_t masks = 0;
3435 
3436 	/*
3437 	 * set power mode. Disable power management at present, do it later
3438 	 */
3439 	(void) memset(&powertable, 0, sizeof (powertable));
3440 	powertable.flags = LE_16(0x8);
3441 	err = iwk_cmd(sc, POWER_TABLE_CMD, &powertable,
3442 	    sizeof (powertable), 0);
3443 	if (err != IWK_SUCCESS) {
3444 		cmn_err(CE_WARN, "iwk_config(): failed to set power mode\n");
3445 		return (err);
3446 	}
3447 
3448 	/* configure bt coexistence */
3449 	(void) memset(&bt, 0, sizeof (bt));
3450 	bt.flags = 3;
3451 	bt.lead_time = 0xaa;
3452 	bt.max_kill = 1;
3453 	err = iwk_cmd(sc, REPLY_BT_CONFIG, &bt,
3454 	    sizeof (bt), 0);
3455 	if (err != IWK_SUCCESS) {
3456 		cmn_err(CE_WARN,
3457 		    "iwk_config(): "
3458 		    "failed to configurate bt coexistence\n");
3459 		return (err);
3460 	}
3461 
3462 	/* configure rxon */
3463 	(void) memset(&sc->sc_config, 0, sizeof (iwk_rxon_cmd_t));
3464 	IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
3465 	IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
3466 	sc->sc_config.chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
3467 	sc->sc_config.flags = (RXON_FLG_TSF2HOST_MSK |
3468 	    RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_BAND_24G_MSK);
3469 	sc->sc_config.flags &= (~RXON_FLG_CCK_MSK);
3470 	switch (ic->ic_opmode) {
3471 	case IEEE80211_M_STA:
3472 		sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
3473 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3474 		    RXON_FILTER_DIS_DECRYPT_MSK |
3475 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
3476 		break;
3477 	case IEEE80211_M_AHDEMO:
3478 		sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
3479 		sc->sc_config.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
3480 		sc->sc_config.filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3481 		    RXON_FILTER_DIS_DECRYPT_MSK |
3482 		    RXON_FILTER_DIS_GRP_DECRYPT_MSK);
3483 		break;
3484 	case IEEE80211_M_HOSTAP:
3485 		sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
3486 		break;
3487 	case IEEE80211_M_MONITOR:
3488 		sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
3489 		sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3490 		    RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
3491 		break;
3492 	}
3493 	sc->sc_config.cck_basic_rates  = 0x0f;
3494 	sc->sc_config.ofdm_basic_rates = 0xff;
3495 
3496 	sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
3497 	sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff;
3498 
3499 	/* set antenna */
3500 
3501 	sc->sc_config.rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3502 	    LE_16((0x7 << RXON_RX_CHAIN_VALID_POS) |
3503 	    (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3504 	    (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3505 
3506 	err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
3507 	    sizeof (iwk_rxon_cmd_t), 0);
3508 	if (err != IWK_SUCCESS) {
3509 		cmn_err(CE_WARN, "iwk_config(): "
3510 		    "failed to set configure command\n");
3511 		return (err);
3512 	}
3513 	/* obtain current temperature of chipset */
3514 	sc->sc_tempera = iwk_curr_tempera(sc);
3515 
3516 	/* make Tx power calibration to determine the gains of DSP and radio */
3517 	err = iwk_tx_power_calibration(sc);
3518 	if (err) {
3519 		cmn_err(CE_WARN, "iwk_config(): "
3520 		    "failed to set tx power table\n");
3521 		return (err);
3522 	}
3523 
3524 	/* add broadcast node so that we can send broadcast frame */
3525 	(void) memset(&node, 0, sizeof (node));
3526 	(void) memset(node.bssid, 0xff, 6);
3527 	node.id = IWK_BROADCAST_ID;
3528 	err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0);
3529 	if (err != IWK_SUCCESS) {
3530 		cmn_err(CE_WARN, "iwk_config(): "
3531 		    "failed to add broadcast node\n");
3532 		return (err);
3533 	}
3534 
3535 	/* TX_LINK_QUALITY cmd ? */
3536 	(void) memset(&link_quality, 0, sizeof (link_quality));
3537 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3538 		masks |= RATE_MCS_CCK_MSK;
3539 		masks |= RATE_MCS_ANT_B_MSK;
3540 		masks &= ~RATE_MCS_ANT_A_MSK;
3541 		link_quality.rate_n_flags[i] = iwk_rate_to_plcp(2) | masks;
3542 	}
3543 
3544 	link_quality.general_params.single_stream_ant_msk = 2;
3545 	link_quality.general_params.dual_stream_ant_msk = 3;
3546 	link_quality.agg_params.agg_dis_start_th = 3;
3547 	link_quality.agg_params.agg_time_limit = LE_16(4000);
3548 	link_quality.sta_id = IWK_BROADCAST_ID;
3549 	err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
3550 	    sizeof (link_quality), 0);
3551 	if (err != IWK_SUCCESS) {
3552 		cmn_err(CE_WARN, "iwk_config(): "
3553 		    "failed to config link quality table\n");
3554 		return (err);
3555 	}
3556 
3557 	return (IWK_SUCCESS);
3558 }
3559 
3560 static void
3561 iwk_stop_master(iwk_sc_t *sc)
3562 {
3563 	uint32_t tmp;
3564 	int n;
3565 
3566 	tmp = IWK_READ(sc, CSR_RESET);
3567 	IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER);
3568 
3569 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
3570 	if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) ==
3571 	    CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE)
3572 		return;
3573 
3574 	for (n = 0; n < 2000; n++) {
3575 		if (IWK_READ(sc, CSR_RESET) &
3576 		    CSR_RESET_REG_FLAG_MASTER_DISABLED)
3577 			break;
3578 		DELAY(1000);
3579 	}
3580 	if (n == 2000)
3581 		IWK_DBG((IWK_DEBUG_HW,
3582 		    "timeout waiting for master stop\n"));
3583 }
3584 
3585 static int
3586 iwk_power_up(iwk_sc_t *sc)
3587 {
3588 	uint32_t tmp;
3589 
3590 	iwk_mac_access_enter(sc);
3591 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3592 	tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC;
3593 	tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN;
3594 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3595 	iwk_mac_access_exit(sc);
3596 
3597 	DELAY(5000);
3598 	return (IWK_SUCCESS);
3599 }
3600 
3601 static int
3602 iwk_preinit(iwk_sc_t *sc)
3603 {
3604 	uint32_t tmp;
3605 	int n;
3606 	uint8_t vlink;
3607 
3608 	/* clear any pending interrupts */
3609 	IWK_WRITE(sc, CSR_INT, 0xffffffff);
3610 
3611 	tmp = IWK_READ(sc, CSR_GIO_CHICKEN_BITS);
3612 	IWK_WRITE(sc, CSR_GIO_CHICKEN_BITS,
3613 	    tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
3614 
3615 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
3616 	IWK_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
3617 
3618 	/* wait for clock ready */
3619 	for (n = 0; n < 1000; n++) {
3620 		if (IWK_READ(sc, CSR_GP_CNTRL) &
3621 		    CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY)
3622 			break;
3623 		DELAY(10);
3624 	}
3625 	if (n == 1000) {
3626 		cmn_err(CE_WARN,
3627 		    "iwk_preinit(): timeout waiting for clock ready\n");
3628 		return (ETIMEDOUT);
3629 	}
3630 	iwk_mac_access_enter(sc);
3631 	tmp = iwk_reg_read(sc, APMG_CLK_CTRL_REG);
3632 	iwk_reg_write(sc, APMG_CLK_CTRL_REG, tmp |
3633 	    APMG_CLK_REG_VAL_DMA_CLK_RQT | APMG_CLK_REG_VAL_BSM_CLK_RQT);
3634 
3635 	DELAY(20);
3636 	tmp = iwk_reg_read(sc, ALM_APMG_PCIDEV_STT);
3637 	iwk_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp |
3638 	    APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE);
3639 	iwk_mac_access_exit(sc);
3640 
3641 	IWK_WRITE(sc, CSR_INT_COALESCING, 512 / 32); /* ??? */
3642 
3643 	(void) iwk_power_up(sc);
3644 
3645 	if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) {
3646 		tmp = ddi_get32(sc->sc_cfg_handle,
3647 		    (uint32_t *)(sc->sc_cfg_base + 0xe8));
3648 		ddi_put32(sc->sc_cfg_handle,
3649 		    (uint32_t *)(sc->sc_cfg_base + 0xe8),
3650 		    tmp & ~(1 << 11));
3651 	}
3652 
3653 
3654 	vlink = ddi_get8(sc->sc_cfg_handle,
3655 	    (uint8_t *)(sc->sc_cfg_base + 0xf0));
3656 	ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0),
3657 	    vlink & ~2);
3658 
3659 	tmp = IWK_READ(sc, CSR_SW_VER);
3660 	tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
3661 	    CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
3662 	    CSR_HW_IF_CONFIG_REG_BIT_KEDRON_R;
3663 	IWK_WRITE(sc, CSR_SW_VER, tmp);
3664 
3665 	/* make sure power supply on each part of the hardware */
3666 	iwk_mac_access_enter(sc);
3667 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3668 	tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
3669 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3670 	DELAY(5);
3671 	tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3672 	tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
3673 	iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3674 	iwk_mac_access_exit(sc);
3675 	return (IWK_SUCCESS);
3676 }
3677 
3678 /*
3679  * set up semphore flag to own EEPROM
3680  */
3681 static int iwk_eep_sem_down(iwk_sc_t *sc)
3682 {
3683 	int count1, count2;
3684 	uint32_t tmp;
3685 
3686 	for (count1 = 0; count1 < 1000; count1++) {
3687 		tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG);
3688 		IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG,
3689 		    tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM);
3690 
3691 		for (count2 = 0; count2 < 2; count2++) {
3692 			if (IWK_READ(sc, CSR_HW_IF_CONFIG_REG) &
3693 			    CSR_HW_IF_CONFIG_REG_EEP_SEM)
3694 				return (IWK_SUCCESS);
3695 			DELAY(10000);
3696 		}
3697 	}
3698 	return (IWK_FAIL);
3699 }
3700 
3701 /*
3702  * reset semphore flag to release EEPROM
3703  */
3704 static void iwk_eep_sem_up(iwk_sc_t *sc)
3705 {
3706 	uint32_t tmp;
3707 
3708 	tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG);
3709 	IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG,
3710 	    tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM));
3711 }
3712 
3713 /*
3714  * This function load all infomation in eeprom into iwk_eep
3715  * structure in iwk_sc_t structure
3716  */
3717 static int iwk_eep_load(iwk_sc_t *sc)
3718 {
3719 	int i, rr;
3720 	uint32_t rv, tmp, eep_gp;
3721 	uint16_t addr, eep_sz = sizeof (sc->sc_eep_map);
3722 	uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map;
3723 
3724 	/* read eeprom gp register in CSR */
3725 	eep_gp = IWK_READ(sc, CSR_EEPROM_GP);
3726 	if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) ==
3727 	    CSR_EEPROM_GP_BAD_SIGNATURE) {
3728 		cmn_err(CE_WARN, "EEPROM not found\n");
3729 		return (IWK_FAIL);
3730 	}
3731 
3732 	rr = iwk_eep_sem_down(sc);
3733 	if (rr != 0) {
3734 		cmn_err(CE_WARN, "failed to own EEPROM\n");
3735 		return (IWK_FAIL);
3736 	}
3737 
3738 	for (addr = 0; addr < eep_sz; addr += 2) {
3739 		IWK_WRITE(sc, CSR_EEPROM_REG, addr<<1);
3740 		tmp = IWK_READ(sc, CSR_EEPROM_REG);
3741 		IWK_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2));
3742 
3743 		for (i = 0; i < 10; i++) {
3744 			rv = IWK_READ(sc, CSR_EEPROM_REG);
3745 			if (rv & 1)
3746 				break;
3747 			DELAY(10);
3748 		}
3749 
3750 		if (!(rv & 1)) {
3751 			cmn_err(CE_WARN, "time out when read EEPROM\n");
3752 			iwk_eep_sem_up(sc);
3753 			return (IWK_FAIL);
3754 		}
3755 
3756 		eep_p[addr/2] = rv >> 16;
3757 	}
3758 
3759 	iwk_eep_sem_up(sc);
3760 	return (IWK_SUCCESS);
3761 }
3762 
3763 /*
3764  * init mac address in ieee80211com_t struct
3765  */
3766 static void iwk_get_mac_from_eep(iwk_sc_t *sc)
3767 {
3768 	ieee80211com_t *ic = &sc->sc_ic;
3769 	struct iwk_eep *ep = &sc->sc_eep_map;
3770 
3771 	IEEE80211_ADDR_COPY(ic->ic_macaddr, ep->mac_address);
3772 
3773 	IWK_DBG((IWK_DEBUG_EEPROM, "mac:%2x:%2x:%2x:%2x:%2x:%2x\n",
3774 	    ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2],
3775 	    ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5]));
3776 }
3777 
3778 static int
3779 iwk_init(iwk_sc_t *sc)
3780 {
3781 	int qid, n, err;
3782 	clock_t clk;
3783 	uint32_t tmp;
3784 
3785 	mutex_enter(&sc->sc_glock);
3786 	sc->sc_flags &= ~IWK_F_FW_INIT;
3787 
3788 	(void) iwk_preinit(sc);
3789 
3790 	tmp = IWK_READ(sc, CSR_GP_CNTRL);
3791 	if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) {
3792 		cmn_err(CE_WARN, "iwk_init(): Radio transmitter is off\n");
3793 		goto fail1;
3794 	}
3795 
3796 	/* init Rx ring */
3797 	iwk_mac_access_enter(sc);
3798 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
3799 
3800 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
3801 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
3802 	    sc->sc_rxq.dma_desc.cookie.dmac_address >> 8);
3803 
3804 	IWK_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG,
3805 	    ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address +
3806 	    offsetof(struct iwk_shared, val0)) >> 4));
3807 
3808 	IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG,
3809 	    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
3810 	    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
3811 	    IWK_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
3812 	    (RX_QUEUE_SIZE_LOG <<
3813 	    FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
3814 	iwk_mac_access_exit(sc);
3815 	IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG,
3816 	    (RX_QUEUE_SIZE - 1) & ~0x7);
3817 
3818 	/* init Tx rings */
3819 	iwk_mac_access_enter(sc);
3820 	iwk_reg_write(sc, SCD_TXFACT, 0);
3821 
3822 	/* keep warm page */
3823 	iwk_reg_write(sc, IWK_FH_KW_MEM_ADDR_REG,
3824 	    sc->sc_dma_kw.cookie.dmac_address >> 4);
3825 
3826 	for (qid = 0; qid < IWK_NUM_QUEUES; qid++) {
3827 		IWK_WRITE(sc, FH_MEM_CBBC_QUEUE(qid),
3828 		    sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8);
3829 		IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(qid),
3830 		    IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
3831 		    IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
3832 	}
3833 	iwk_mac_access_exit(sc);
3834 
3835 	/* clear "radio off" and "disable command" bits */
3836 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
3837 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR,
3838 	    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3839 
3840 	/* clear any pending interrupts */
3841 	IWK_WRITE(sc, CSR_INT, 0xffffffff);
3842 
3843 	/* enable interrupts */
3844 	IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
3845 
3846 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
3847 	IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
3848 
3849 	/*
3850 	 * backup ucode data part for future use.
3851 	 */
3852 	(void) memcpy(sc->sc_dma_fw_data_bak.mem_va,
3853 	    sc->sc_dma_fw_data.mem_va,
3854 	    sc->sc_dma_fw_data.alength);
3855 
3856 	for (n = 0; n < 2; n++) {
3857 		/* load firmware init segment into NIC */
3858 		err = iwk_load_firmware(sc);
3859 		if (err != IWK_SUCCESS) {
3860 			cmn_err(CE_WARN, "iwk_init(): "
3861 			    "failed to setup boot firmware\n");
3862 			continue;
3863 		}
3864 
3865 		/* now press "execute" start running */
3866 		IWK_WRITE(sc, CSR_RESET, 0);
3867 		break;
3868 	}
3869 	if (n == 2) {
3870 		cmn_err(CE_WARN, "iwk_init(): failed to load firmware\n");
3871 		goto fail1;
3872 	}
3873 	/* ..and wait at most one second for adapter to initialize */
3874 	clk = ddi_get_lbolt() + drv_usectohz(2000000);
3875 	while (!(sc->sc_flags & IWK_F_FW_INIT)) {
3876 		if (cv_timedwait(&sc->sc_fw_cv, &sc->sc_glock, clk) < 0)
3877 			break;
3878 	}
3879 	if (!(sc->sc_flags & IWK_F_FW_INIT)) {
3880 		cmn_err(CE_WARN,
3881 		    "iwk_init(): timeout waiting for firmware init\n");
3882 		goto fail1;
3883 	}
3884 
3885 	/*
3886 	 * at this point, the firmware is loaded OK, then config the hardware
3887 	 * with the ucode API, including rxon, txpower, etc.
3888 	 */
3889 	err = iwk_config(sc);
3890 	if (err) {
3891 		cmn_err(CE_WARN, "iwk_init(): failed to configure device\n");
3892 		goto fail1;
3893 	}
3894 
3895 	/* at this point, hardware may receive beacons :) */
3896 	mutex_exit(&sc->sc_glock);
3897 	return (IWK_SUCCESS);
3898 
3899 fail1:
3900 	err = IWK_FAIL;
3901 	mutex_exit(&sc->sc_glock);
3902 	return (err);
3903 }
3904 
3905 static void
3906 iwk_stop(iwk_sc_t *sc)
3907 {
3908 	uint32_t tmp;
3909 	int i;
3910 
3911 	if (!(sc->sc_flags & IWK_F_QUIESCED))
3912 		mutex_enter(&sc->sc_glock);
3913 
3914 	IWK_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3915 	/* disable interrupts */
3916 	IWK_WRITE(sc, CSR_INT_MASK, 0);
3917 	IWK_WRITE(sc, CSR_INT, CSR_INI_SET_MASK);
3918 	IWK_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff);
3919 
3920 	/* reset all Tx rings */
3921 	for (i = 0; i < IWK_NUM_QUEUES; i++)
3922 		iwk_reset_tx_ring(sc, &sc->sc_txq[i]);
3923 
3924 	/* reset Rx ring */
3925 	iwk_reset_rx_ring(sc);
3926 
3927 	iwk_mac_access_enter(sc);
3928 	iwk_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT);
3929 	iwk_mac_access_exit(sc);
3930 
3931 	DELAY(5);
3932 
3933 	iwk_stop_master(sc);
3934 
3935 	sc->sc_tx_timer = 0;
3936 	tmp = IWK_READ(sc, CSR_RESET);
3937 	IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET);
3938 
3939 	if (!(sc->sc_flags & IWK_F_QUIESCED))
3940 		mutex_exit(&sc->sc_glock);
3941 }
3942 
3943 /*
3944  * Naive implementation of the Adaptive Multi Rate Retry algorithm:
3945  * "IEEE 802.11 Rate Adaptation: A Practical Approach"
3946  * Mathieu Lacage, Hossein Manshaei, Thierry Turletti
3947  * INRIA Sophia - Projet Planete
3948  * http://www-sop.inria.fr/rapports/sophia/RR-5208.html
3949  */
3950 #define	is_success(amrr)	\
3951 	((amrr)->retrycnt < (amrr)->txcnt / 10)
3952 #define	is_failure(amrr)	\
3953 	((amrr)->retrycnt > (amrr)->txcnt / 3)
3954 #define	is_enough(amrr)		\
3955 	((amrr)->txcnt > 100)
3956 #define	is_min_rate(in)		\
3957 	((in)->in_txrate == 0)
3958 #define	is_max_rate(in)		\
3959 	((in)->in_txrate == (in)->in_rates.ir_nrates - 1)
3960 #define	increase_rate(in)	\
3961 	((in)->in_txrate++)
3962 #define	decrease_rate(in)	\
3963 	((in)->in_txrate--)
3964 #define	reset_cnt(amrr)		\
3965 	{ (amrr)->txcnt = (amrr)->retrycnt = 0; }
3966 
3967 #define	IWK_AMRR_MIN_SUCCESS_THRESHOLD	 1
3968 #define	IWK_AMRR_MAX_SUCCESS_THRESHOLD	15
3969 
3970 static void
3971 iwk_amrr_init(iwk_amrr_t *amrr)
3972 {
3973 	amrr->success = 0;
3974 	amrr->recovery = 0;
3975 	amrr->txcnt = amrr->retrycnt = 0;
3976 	amrr->success_threshold = IWK_AMRR_MIN_SUCCESS_THRESHOLD;
3977 }
3978 
3979 static void
3980 iwk_amrr_timeout(iwk_sc_t *sc)
3981 {
3982 	ieee80211com_t *ic = &sc->sc_ic;
3983 
3984 	IWK_DBG((IWK_DEBUG_RATECTL, "iwk_amrr_timeout() enter\n"));
3985 	if (ic->ic_opmode == IEEE80211_M_STA)
3986 		iwk_amrr_ratectl(NULL, ic->ic_bss);
3987 	else
3988 		ieee80211_iterate_nodes(&ic->ic_sta, iwk_amrr_ratectl, NULL);
3989 	sc->sc_clk = ddi_get_lbolt();
3990 }
3991 
3992 /* ARGSUSED */
3993 static void
3994 iwk_amrr_ratectl(void *arg, ieee80211_node_t *in)
3995 {
3996 	iwk_amrr_t *amrr = (iwk_amrr_t *)in;
3997 	int need_change = 0;
3998 
3999 	if (is_success(amrr) && is_enough(amrr)) {
4000 		amrr->success++;
4001 		if (amrr->success >= amrr->success_threshold &&
4002 		    !is_max_rate(in)) {
4003 			amrr->recovery = 1;
4004 			amrr->success = 0;
4005 			increase_rate(in);
4006 			IWK_DBG((IWK_DEBUG_RATECTL,
4007 			    "AMRR increasing rate %d (txcnt=%d retrycnt=%d)\n",
4008 			    in->in_txrate, amrr->txcnt, amrr->retrycnt));
4009 			need_change = 1;
4010 		} else {
4011 			amrr->recovery = 0;
4012 		}
4013 	} else if (is_failure(amrr)) {
4014 		amrr->success = 0;
4015 		if (!is_min_rate(in)) {
4016 			if (amrr->recovery) {
4017 				amrr->success_threshold++;
4018 				if (amrr->success_threshold >
4019 				    IWK_AMRR_MAX_SUCCESS_THRESHOLD)
4020 					amrr->success_threshold =
4021 					    IWK_AMRR_MAX_SUCCESS_THRESHOLD;
4022 			} else {
4023 				amrr->success_threshold =
4024 				    IWK_AMRR_MIN_SUCCESS_THRESHOLD;
4025 			}
4026 			decrease_rate(in);
4027 			IWK_DBG((IWK_DEBUG_RATECTL,
4028 			    "AMRR decreasing rate %d (txcnt=%d retrycnt=%d)\n",
4029 			    in->in_txrate, amrr->txcnt, amrr->retrycnt));
4030 			need_change = 1;
4031 		}
4032 		amrr->recovery = 0;	/* paper is incorrect */
4033 	}
4034 
4035 	if (is_enough(amrr) || need_change)
4036 		reset_cnt(amrr);
4037 }
4038 
4039 /*
4040  * calculate 4965 chipset's kelvin temperature according to
4041  * the data of init alive and satistics notification.
4042  * The details is described in iwk_calibration.h file
4043  */
4044 static int32_t iwk_curr_tempera(iwk_sc_t *sc)
4045 {
4046 	int32_t  tempera;
4047 	int32_t  r1, r2, r3;
4048 	uint32_t  r4_u;
4049 	int32_t   r4_s;
4050 
4051 	if (iwk_is_fat_channel(sc)) {
4052 		r1 = (int32_t)(sc->sc_card_alive_init.therm_r1[1]);
4053 		r2 = (int32_t)(sc->sc_card_alive_init.therm_r2[1]);
4054 		r3 = (int32_t)(sc->sc_card_alive_init.therm_r3[1]);
4055 		r4_u = sc->sc_card_alive_init.therm_r4[1];
4056 	} else {
4057 		r1 = (int32_t)(sc->sc_card_alive_init.therm_r1[0]);
4058 		r2 = (int32_t)(sc->sc_card_alive_init.therm_r2[0]);
4059 		r3 = (int32_t)(sc->sc_card_alive_init.therm_r3[0]);
4060 		r4_u = sc->sc_card_alive_init.therm_r4[0];
4061 	}
4062 
4063 	if (sc->sc_flags & IWK_F_STATISTICS) {
4064 		r4_s = (int32_t)(sc->sc_statistics.general.temperature <<
4065 		    (31-23)) >> (31-23);
4066 	} else {
4067 		r4_s = (int32_t)(r4_u << (31-23)) >> (31-23);
4068 	}
4069 
4070 	IWK_DBG((IWK_DEBUG_CALIBRATION, "temperature R[1-4]: %d %d %d %d\n",
4071 	    r1, r2, r3, r4_s));
4072 
4073 	if (r3 == r1) {
4074 		cmn_err(CE_WARN, "iwk_curr_tempera(): "
4075 		    "failed to calculate temperature"
4076 		    "because r3 = r1\n");
4077 		return (DDI_FAILURE);
4078 	}
4079 
4080 	tempera = TEMPERATURE_CALIB_A_VAL * (r4_s - r2);
4081 	tempera /= (r3 - r1);
4082 	tempera = (tempera*97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
4083 
4084 	IWK_DBG((IWK_DEBUG_CALIBRATION, "calculated temperature: %dK, %dC\n",
4085 	    tempera, KELVIN_TO_CELSIUS(tempera)));
4086 
4087 	return (tempera);
4088 }
4089 
4090 /* Determine whether 4965 is using 2.4 GHz band */
4091 static inline int iwk_is_24G_band(iwk_sc_t *sc)
4092 {
4093 	return (sc->sc_config.flags & RXON_FLG_BAND_24G_MSK);
4094 }
4095 
4096 /* Determine whether 4965 is using fat channel */
4097 static inline int iwk_is_fat_channel(iwk_sc_t *sc)
4098 {
4099 	return ((sc->sc_config.flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
4100 	    (sc->sc_config.flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK));
4101 }
4102 
4103 /*
4104  * In MIMO mode, determine which group 4965's current channel belong to.
4105  * For more infomation about "channel group",
4106  * please refer to iwk_calibration.h file
4107  */
4108 static int iwk_txpower_grp(uint16_t channel)
4109 {
4110 	if (channel >= CALIB_IWK_TX_ATTEN_GR5_FCH &&
4111 	    channel <= CALIB_IWK_TX_ATTEN_GR5_LCH) {
4112 		return (CALIB_CH_GROUP_5);
4113 	}
4114 
4115 	if (channel >= CALIB_IWK_TX_ATTEN_GR1_FCH &&
4116 	    channel <= CALIB_IWK_TX_ATTEN_GR1_LCH) {
4117 		return (CALIB_CH_GROUP_1);
4118 	}
4119 
4120 	if (channel >= CALIB_IWK_TX_ATTEN_GR2_FCH &&
4121 	    channel <= CALIB_IWK_TX_ATTEN_GR2_LCH) {
4122 		return (CALIB_CH_GROUP_2);
4123 	}
4124 
4125 	if (channel >= CALIB_IWK_TX_ATTEN_GR3_FCH &&
4126 	    channel <= CALIB_IWK_TX_ATTEN_GR3_LCH) {
4127 		return (CALIB_CH_GROUP_3);
4128 	}
4129 
4130 	if (channel >= CALIB_IWK_TX_ATTEN_GR4_FCH &&
4131 	    channel <= CALIB_IWK_TX_ATTEN_GR4_LCH) {
4132 		return (CALIB_CH_GROUP_4);
4133 	}
4134 
4135 	cmn_err(CE_WARN, "iwk_txpower_grp(): "
4136 	    "can't find txpower group for channel %d.\n", channel);
4137 
4138 	return (DDI_FAILURE);
4139 }
4140 
4141 /* 2.4 GHz */
4142 static uint16_t iwk_eep_band_1[14] = {
4143 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
4144 };
4145 
4146 /* 5.2 GHz bands */
4147 static uint16_t iwk_eep_band_2[13] = {
4148 	183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
4149 };
4150 
4151 static uint16_t iwk_eep_band_3[12] = {
4152 	34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
4153 };
4154 
4155 static uint16_t iwk_eep_band_4[11] = {
4156 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
4157 };
4158 
4159 static uint16_t iwk_eep_band_5[6] = {
4160 	145, 149, 153, 157, 161, 165
4161 };
4162 
4163 static uint16_t iwk_eep_band_6[7] = {
4164 	1, 2, 3, 4, 5, 6, 7
4165 };
4166 
4167 static uint16_t iwk_eep_band_7[11] = {
4168 	36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
4169 };
4170 
4171 /* Get regulatory data from eeprom for a given channel */
4172 static struct iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc,
4173     uint16_t channel,
4174     int is_24G, int is_fat, int is_hi_chan)
4175 {
4176 	int32_t i;
4177 	uint16_t chan;
4178 
4179 	if (is_fat) {  /* 11n mode */
4180 
4181 		if (is_hi_chan) {
4182 			chan = channel - 4;
4183 		} else {
4184 			chan = channel;
4185 		}
4186 
4187 		for (i = 0; i < 7; i++) {
4188 			if (iwk_eep_band_6[i] == chan) {
4189 				return (&sc->sc_eep_map.band_24_channels[i]);
4190 			}
4191 		}
4192 		for (i = 0; i < 11; i++) {
4193 			if (iwk_eep_band_7[i] == chan) {
4194 				return (&sc->sc_eep_map.band_52_channels[i]);
4195 			}
4196 		}
4197 	} else if (is_24G) {  /* 2.4 GHz band */
4198 		for (i = 0; i < 14; i++) {
4199 			if (iwk_eep_band_1[i] == channel) {
4200 				return (&sc->sc_eep_map.band_1_channels[i]);
4201 			}
4202 		}
4203 	} else {  /* 5 GHz band */
4204 		for (i = 0; i < 13; i++) {
4205 			if (iwk_eep_band_2[i] == channel) {
4206 				return (&sc->sc_eep_map.band_2_channels[i]);
4207 			}
4208 		}
4209 		for (i = 0; i < 12; i++) {
4210 			if (iwk_eep_band_3[i] == channel) {
4211 				return (&sc->sc_eep_map.band_3_channels[i]);
4212 			}
4213 		}
4214 		for (i = 0; i < 11; i++) {
4215 			if (iwk_eep_band_4[i] == channel) {
4216 				return (&sc->sc_eep_map.band_4_channels[i]);
4217 			}
4218 		}
4219 		for (i = 0; i < 6; i++) {
4220 			if (iwk_eep_band_5[i] == channel) {
4221 				return (&sc->sc_eep_map.band_5_channels[i]);
4222 			}
4223 		}
4224 	}
4225 
4226 	return (NULL);
4227 }
4228 
4229 /*
4230  * Determine which subband a given channel belongs
4231  * to in 2.4 GHz or 5 GHz band
4232  */
4233 static int32_t iwk_band_number(iwk_sc_t *sc, uint16_t channel)
4234 {
4235 	int32_t b_n = -1;
4236 
4237 	for (b_n = 0; b_n < EEP_TX_POWER_BANDS; b_n++) {
4238 		if (0 == sc->sc_eep_map.calib_info.band_info_tbl[b_n].ch_from) {
4239 			continue;
4240 		}
4241 
4242 		if ((channel >=
4243 		    (uint16_t)sc->sc_eep_map.calib_info.
4244 		    band_info_tbl[b_n].ch_from) &&
4245 		    (channel <=
4246 		    (uint16_t)sc->sc_eep_map.calib_info.
4247 		    band_info_tbl[b_n].ch_to)) {
4248 			break;
4249 		}
4250 	}
4251 
4252 	return (b_n);
4253 }
4254 
4255 /* Make a special division for interpolation operation */
4256 static int iwk_division(int32_t num, int32_t denom, int32_t *res)
4257 {
4258 	int32_t sign = 1;
4259 
4260 	if (num < 0) {
4261 		sign = -sign;
4262 		num = -num;
4263 	}
4264 
4265 	if (denom < 0) {
4266 		sign = -sign;
4267 		denom = -denom;
4268 	}
4269 
4270 	*res = ((num*2 + denom) / (denom*2)) * sign;
4271 
4272 	return (IWK_SUCCESS);
4273 }
4274 
4275 /* Make interpolation operation */
4276 static int32_t iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1,
4277     int32_t x2, int32_t y2)
4278 {
4279 	int32_t val;
4280 
4281 	if (x2 == x1) {
4282 		return (y1);
4283 	} else {
4284 		(void) iwk_division((x2-x)*(y1-y2), (x2-x1), &val);
4285 		return (val + y2);
4286 	}
4287 }
4288 
4289 /* Get interpolation measurement data of a given channel for all chains. */
4290 static int iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel,
4291     struct iwk_eep_calib_channel_info *chan_info)
4292 {
4293 	int32_t ban_n;
4294 	uint32_t ch1_n, ch2_n;
4295 	int32_t c, m;
4296 	struct iwk_eep_calib_measure *m1_p, *m2_p, *m_p;
4297 
4298 	/* determine subband number */
4299 	ban_n = iwk_band_number(sc, channel);
4300 	if (ban_n >= EEP_TX_POWER_BANDS) {
4301 		return (DDI_FAILURE);
4302 	}
4303 
4304 	ch1_n =
4305 	    (uint32_t)sc->sc_eep_map.calib_info.band_info_tbl[ban_n].ch1.ch_num;
4306 	ch2_n =
4307 	    (uint32_t)sc->sc_eep_map.calib_info.band_info_tbl[ban_n].ch2.ch_num;
4308 
4309 	chan_info->ch_num = (uint8_t)channel;  /* given channel number */
4310 
4311 	/*
4312 	 * go through all chains on chipset
4313 	 */
4314 	for (c = 0; c < EEP_TX_POWER_TX_CHAINS; c++) {
4315 		/*
4316 		 * go through all factory measurements
4317 		 */
4318 		for (m = 0; m < EEP_TX_POWER_MEASUREMENTS; m++) {
4319 			m1_p =
4320 			    &(sc->sc_eep_map.calib_info.
4321 			    band_info_tbl[ban_n].ch1.measure[c][m]);
4322 			m2_p =
4323 			    &(sc->sc_eep_map.calib_info.band_info_tbl[ban_n].
4324 			    ch2.measure[c][m]);
4325 			m_p = &(chan_info->measure[c][m]);
4326 
4327 			/*
4328 			 * make interpolation to get actual
4329 			 * Tx power for given channel
4330 			 */
4331 			m_p->actual_pow = iwk_interpolate_value(channel,
4332 			    ch1_n, m1_p->actual_pow,
4333 			    ch2_n, m2_p->actual_pow);
4334 
4335 			/* make interpolation to get index into gain table */
4336 			m_p->gain_idx = iwk_interpolate_value(channel,
4337 			    ch1_n, m1_p->gain_idx,
4338 			    ch2_n, m2_p->gain_idx);
4339 
4340 			/* make interpolation to get chipset temperature */
4341 			m_p->temperature = iwk_interpolate_value(channel,
4342 			    ch1_n, m1_p->temperature,
4343 			    ch2_n, m2_p->temperature);
4344 
4345 			/*
4346 			 * make interpolation to get power
4347 			 * amp detector level
4348 			 */
4349 			m_p->pa_det = iwk_interpolate_value(channel, ch1_n,
4350 			    m1_p->pa_det,
4351 			    ch2_n, m2_p->pa_det);
4352 		}
4353 	}
4354 
4355 	return (IWK_SUCCESS);
4356 }
4357 
4358 /*
4359  * Calculate voltage compensation for Tx power. For more infomation,
4360  * please refer to iwk_calibration.h file
4361  */
4362 static int32_t iwk_voltage_compensation(int32_t eep_voltage,
4363     int32_t curr_voltage)
4364 {
4365 	int32_t vol_comp = 0;
4366 
4367 	if ((TX_POWER_IWK_ILLEGAL_VOLTAGE == eep_voltage) ||
4368 	    (TX_POWER_IWK_ILLEGAL_VOLTAGE == curr_voltage)) {
4369 		return (vol_comp);
4370 	}
4371 
4372 	(void) iwk_division(curr_voltage-eep_voltage,
4373 	    TX_POWER_IWK_VOLTAGE_CODES_PER_03V, &vol_comp);
4374 
4375 	if (curr_voltage > eep_voltage) {
4376 		vol_comp *= 2;
4377 	}
4378 	if ((vol_comp < -2) || (vol_comp > 2)) {
4379 		vol_comp = 0;
4380 	}
4381 
4382 	return (vol_comp);
4383 }
4384 
4385 /*
4386  * Thermal compensation values for txpower for various frequency ranges ...
4387  * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust
4388  */
4389 static struct iwk_txpower_tempera_comp {
4390 	int32_t degrees_per_05db_a;
4391 	int32_t degrees_per_05db_a_denom;
4392 } txpower_tempera_comp_table[CALIB_CH_GROUP_MAX] = {
4393 	{9, 2},			/* group 0 5.2, ch  34-43 */
4394 	{4, 1},			/* group 1 5.2, ch  44-70 */
4395 	{4, 1},			/* group 2 5.2, ch  71-124 */
4396 	{4, 1},			/* group 3 5.2, ch 125-200 */
4397 	{3, 1}			/* group 4 2.4, ch   all */
4398 };
4399 
4400 /*
4401  * bit-rate-dependent table to prevent Tx distortion, in half-dB units,
4402  * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates.
4403  */
4404 static int32_t back_off_table[] = {
4405 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
4406 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
4407 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
4408 	10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
4409 	10			/* CCK */
4410 };
4411 
4412 /* determine minimum Tx power index in gain table */
4413 static int32_t iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G)
4414 {
4415 	if ((!is_24G) && ((rate_pow_idx & 7) <= 4)) {
4416 		return (MIN_TX_GAIN_INDEX_52GHZ_EXT);
4417 	}
4418 
4419 	return (MIN_TX_GAIN_INDEX);
4420 }
4421 
4422 /*
4423  * Determine DSP and radio gain according to temperature and other factors.
4424  * This function is the majority of Tx power calibration
4425  */
4426 static int iwk_txpower_table_cmd_init(iwk_sc_t *sc,
4427     struct iwk_tx_power_db *tp_db)
4428 {
4429 	int is_24G, is_fat, is_high_chan, is_mimo;
4430 	int c, r;
4431 	int32_t target_power;
4432 	int32_t tx_grp = CALIB_CH_GROUP_MAX;
4433 	uint16_t channel;
4434 	uint8_t saturation_power;
4435 	int32_t regu_power;
4436 	int32_t curr_regu_power;
4437 	struct iwk_eep_channel *eep_chan_p;
4438 	struct iwk_eep_calib_channel_info eep_chan_calib;
4439 	int32_t eep_voltage, init_voltage;
4440 	int32_t voltage_compensation;
4441 	int32_t temperature;
4442 	int32_t degrees_per_05db_num;
4443 	int32_t degrees_per_05db_denom;
4444 	struct iwk_eep_calib_measure *measure_p;
4445 	int32_t interpo_temp;
4446 	int32_t power_limit;
4447 	int32_t atten_value;
4448 	int32_t tempera_comp[2];
4449 	int32_t interpo_gain_idx[2];
4450 	int32_t interpo_actual_pow[2];
4451 	union iwk_tx_power_dual_stream txpower_gains;
4452 	int32_t txpower_gains_idx;
4453 
4454 	channel = sc->sc_config.chan;
4455 
4456 	/* 2.4 GHz or 5 GHz band */
4457 	is_24G = iwk_is_24G_band(sc);
4458 
4459 	/* fat channel or not */
4460 	is_fat = iwk_is_fat_channel(sc);
4461 
4462 	/*
4463 	 * using low half channel number or high half channel number
4464 	 * identify fat channel
4465 	 */
4466 	if (is_fat && (sc->sc_config.flags &
4467 	    RXON_FLG_CONTROL_CHANNEL_LOC_HIGH_MSK)) {
4468 		is_high_chan = 1;
4469 	}
4470 
4471 	if ((channel > 0) && (channel < 200)) {
4472 		/* get regulatory channel data from eeprom */
4473 		eep_chan_p = iwk_get_eep_channel(sc, channel, is_24G,
4474 		    is_fat, is_high_chan);
4475 		if (NULL == eep_chan_p) {
4476 			cmn_err(CE_WARN,
4477 			    "iwk_txpower_table_cmd_init(): "
4478 			    "can't get channel infomation\n");
4479 			return (DDI_FAILURE);
4480 		}
4481 	} else {
4482 		cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4483 		    "channel(%d) isn't in proper range\n",
4484 		    channel);
4485 		return (DDI_FAILURE);
4486 	}
4487 
4488 	/* initial value of Tx power */
4489 	sc->sc_user_txpower = (int32_t)eep_chan_p->max_power_avg;
4490 	if (sc->sc_user_txpower < IWK_TX_POWER_TARGET_POWER_MIN) {
4491 		cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4492 		    "user TX power is too weak\n");
4493 		return (DDI_FAILURE);
4494 	} else if (sc->sc_user_txpower > IWK_TX_POWER_TARGET_POWER_MAX) {
4495 		cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4496 		    "user TX power is too strong\n");
4497 		return (DDI_FAILURE);
4498 	}
4499 
4500 	target_power = 2 * sc->sc_user_txpower;
4501 
4502 	/* determine which group current channel belongs to */
4503 	tx_grp = iwk_txpower_grp(channel);
4504 	if (tx_grp < 0) {
4505 		return (tx_grp);
4506 	}
4507 
4508 
4509 	if (is_fat) {
4510 		if (is_high_chan) {
4511 			channel -= 2;
4512 		} else {
4513 			channel += 2;
4514 		}
4515 	}
4516 
4517 	/* determine saturation power */
4518 	if (is_24G) {
4519 		saturation_power =
4520 		    sc->sc_eep_map.calib_info.saturation_power24;
4521 	} else {
4522 		saturation_power =
4523 		    sc->sc_eep_map.calib_info.saturation_power52;
4524 	}
4525 
4526 	if (saturation_power < IWK_TX_POWER_SATURATION_MIN ||
4527 	    saturation_power > IWK_TX_POWER_SATURATION_MAX) {
4528 		if (is_24G) {
4529 			saturation_power = IWK_TX_POWER_DEFAULT_SATURATION_24;
4530 		} else {
4531 			saturation_power = IWK_TX_POWER_DEFAULT_SATURATION_52;
4532 		}
4533 	}
4534 
4535 	/* determine regulatory power */
4536 	regu_power = (int32_t)eep_chan_p->max_power_avg * 2;
4537 	if ((regu_power < IWK_TX_POWER_REGULATORY_MIN) ||
4538 	    (regu_power > IWK_TX_POWER_REGULATORY_MAX)) {
4539 		if (is_24G) {
4540 			regu_power = IWK_TX_POWER_DEFAULT_REGULATORY_24;
4541 		} else {
4542 			regu_power = IWK_TX_POWER_DEFAULT_REGULATORY_52;
4543 		}
4544 	}
4545 
4546 	/*
4547 	 * get measurement data for current channel
4548 	 * suach as temperature,index to gain table,actual Tx power
4549 	 */
4550 	(void) iwk_channel_interpolate(sc, channel, &eep_chan_calib);
4551 
4552 	eep_voltage = (int32_t)sc->sc_eep_map.calib_info.voltage;
4553 	init_voltage = (int32_t)sc->sc_card_alive_init.voltage;
4554 
4555 	/* calculate voltage compensation to Tx power */
4556 	voltage_compensation =
4557 	    iwk_voltage_compensation(eep_voltage, init_voltage);
4558 
4559 	if (sc->sc_tempera >= IWK_TX_POWER_TEMPERATURE_MIN) {
4560 		temperature = sc->sc_tempera;
4561 	} else {
4562 		temperature = IWK_TX_POWER_TEMPERATURE_MIN;
4563 	}
4564 	if (sc->sc_tempera <= IWK_TX_POWER_TEMPERATURE_MAX) {
4565 		temperature = sc->sc_tempera;
4566 	} else {
4567 		temperature = IWK_TX_POWER_TEMPERATURE_MAX;
4568 	}
4569 	temperature = KELVIN_TO_CELSIUS(temperature);
4570 
4571 	degrees_per_05db_num =
4572 	    txpower_tempera_comp_table[tx_grp].degrees_per_05db_a;
4573 	degrees_per_05db_denom =
4574 	    txpower_tempera_comp_table[tx_grp].degrees_per_05db_a_denom;
4575 
4576 	for (c = 0; c < 2; c++) {  /* go through all chains */
4577 		measure_p = &eep_chan_calib.measure[c][1];
4578 		interpo_temp = measure_p->temperature;
4579 
4580 		/* determine temperature compensation to Tx power */
4581 		(void) iwk_division(
4582 		    (temperature-interpo_temp)*degrees_per_05db_denom,
4583 		    degrees_per_05db_num, &tempera_comp[c]);
4584 
4585 		interpo_gain_idx[c] = measure_p->gain_idx;
4586 		interpo_actual_pow[c] = measure_p->actual_pow;
4587 	}
4588 
4589 	/*
4590 	 * go through all rate entries in Tx power table
4591 	 */
4592 	for (r = 0; r < POWER_TABLE_NUM_ENTRIES; r++) {
4593 		if (r & 0x8) {
4594 			/* need to lower regulatory power for MIMO mode */
4595 			curr_regu_power = regu_power -
4596 			    IWK_TX_POWER_MIMO_REGULATORY_COMPENSATION;
4597 			is_mimo = 1;
4598 		} else {
4599 			curr_regu_power = regu_power;
4600 			is_mimo = 0;
4601 		}
4602 
4603 		power_limit = saturation_power - back_off_table[r];
4604 		if (power_limit > curr_regu_power) {
4605 			/* final Tx power limit */
4606 			power_limit = curr_regu_power;
4607 		}
4608 
4609 		if (target_power > power_limit) {
4610 			target_power = power_limit; /* final target Tx power */
4611 		}
4612 
4613 		for (c = 0; c < 2; c++) {	  /* go through all Tx chains */
4614 			if (is_mimo) {
4615 				atten_value =
4616 				    sc->sc_card_alive_init.tx_atten[tx_grp][c];
4617 			} else {
4618 				atten_value = 0;
4619 			}
4620 
4621 			/*
4622 			 * calculate index in gain table
4623 			 * this step is very important
4624 			 */
4625 			txpower_gains_idx = interpo_gain_idx[c] -
4626 			    (target_power - interpo_actual_pow[c]) -
4627 			    tempera_comp[c] - voltage_compensation +
4628 			    atten_value;
4629 
4630 			if (txpower_gains_idx <
4631 			    iwk_min_power_index(r, is_24G)) {
4632 				txpower_gains_idx =
4633 				    iwk_min_power_index(r, is_24G);
4634 			}
4635 
4636 			if (!is_24G) {
4637 				/*
4638 				 * support negative index for 5 GHz
4639 				 * band
4640 				 */
4641 				txpower_gains_idx += 9;
4642 			}
4643 
4644 			if (POWER_TABLE_CCK_ENTRY == r) {
4645 				/* for CCK mode, make necessary attenuaton */
4646 				txpower_gains_idx +=
4647 				    IWK_TX_POWER_CCK_COMPENSATION_C_STEP;
4648 			}
4649 
4650 			if (txpower_gains_idx > 107) {
4651 				txpower_gains_idx = 107;
4652 			} else if (txpower_gains_idx < 0) {
4653 				txpower_gains_idx = 0;
4654 			}
4655 
4656 			/* search DSP and radio gains in gain table */
4657 			txpower_gains.s.radio_tx_gain[c] =
4658 			    gains_table[is_24G][txpower_gains_idx].radio;
4659 			txpower_gains.s.dsp_predis_atten[c] =
4660 			    gains_table[is_24G][txpower_gains_idx].dsp;
4661 
4662 			IWK_DBG((IWK_DEBUG_CALIBRATION,
4663 			    "rate_index: %d, "
4664 			    "gain_index %d, c: %d,is_mimo: %d\n",
4665 			    r, txpower_gains_idx, c, is_mimo));
4666 		}
4667 
4668 		/* initialize Tx power table */
4669 		if (r < POWER_TABLE_NUM_HT_OFDM_ENTRIES) {
4670 			tp_db->ht_ofdm_power[r].dw = txpower_gains.dw;
4671 		} else {
4672 			tp_db->legacy_cck_power.dw = txpower_gains.dw;
4673 		}
4674 	}
4675 
4676 	return (IWK_SUCCESS);
4677 }
4678 
4679 /*
4680  * make Tx power calibration to adjust Tx power.
4681  * This is completed by sending out Tx power table command.
4682  */
4683 static int iwk_tx_power_calibration(iwk_sc_t *sc)
4684 {
4685 	iwk_tx_power_table_cmd_t cmd;
4686 	int rv;
4687 
4688 	if (sc->sc_flags & IWK_F_SCANNING) {
4689 		return (IWK_SUCCESS);
4690 	}
4691 
4692 	/* necessary initialization to Tx power table command */
4693 	cmd.band = (uint8_t)iwk_is_24G_band(sc);
4694 	cmd.channel = sc->sc_config.chan;
4695 	cmd.channel_normal_width = 0;
4696 
4697 	/* initialize Tx power table */
4698 	rv = iwk_txpower_table_cmd_init(sc, &cmd.tx_power);
4699 	if (rv) {
4700 		cmn_err(CE_NOTE, "rv= %d\n", rv);
4701 		return (rv);
4702 	}
4703 
4704 	/* send out Tx power table command */
4705 	rv = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &cmd, sizeof (cmd), 1);
4706 	if (rv) {
4707 		return (rv);
4708 	}
4709 
4710 	/* record current temperature */
4711 	sc->sc_last_tempera = sc->sc_tempera;
4712 
4713 	return (IWK_SUCCESS);
4714 }
4715 
4716 /* This function is the handler of statistics notification from uCode */
4717 static void iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc)
4718 {
4719 	int is_diff;
4720 	struct iwk_notif_statistics *statistics_p =
4721 	    (struct iwk_notif_statistics *)(desc + 1);
4722 
4723 	mutex_enter(&sc->sc_glock);
4724 
4725 	is_diff = (sc->sc_statistics.general.temperature !=
4726 	    statistics_p->general.temperature) ||
4727 	    ((sc->sc_statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK) !=
4728 	    (statistics_p->flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK));
4729 
4730 	/* update statistics data */
4731 	(void) memcpy(&sc->sc_statistics, statistics_p,
4732 	    sizeof (struct iwk_notif_statistics));
4733 
4734 	sc->sc_flags |= IWK_F_STATISTICS;
4735 
4736 	if (!(sc->sc_flags & IWK_F_SCANNING)) {
4737 		/* make Receiver gain balance calibration */
4738 		(void) iwk_rxgain_diff(sc);
4739 
4740 		/* make Receiver sensitivity calibration */
4741 		(void) iwk_rx_sens(sc);
4742 	}
4743 
4744 
4745 	if (!is_diff) {
4746 		mutex_exit(&sc->sc_glock);
4747 		return;
4748 	}
4749 
4750 	/* calibration current temperature of 4965 chipset */
4751 	sc->sc_tempera = iwk_curr_tempera(sc);
4752 
4753 	/* distinct temperature change will trigger Tx power calibration */
4754 	if (((sc->sc_tempera - sc->sc_last_tempera) >= 3) ||
4755 	    ((sc->sc_last_tempera - sc->sc_tempera) >= 3)) {
4756 		/* make Tx power calibration */
4757 		(void) iwk_tx_power_calibration(sc);
4758 	}
4759 
4760 	mutex_exit(&sc->sc_glock);
4761 }
4762 
4763 /* Determine this station is in associated state or not */
4764 static int iwk_is_associated(iwk_sc_t *sc)
4765 {
4766 	return (sc->sc_config.filter_flags & RXON_FILTER_ASSOC_MSK);
4767 }
4768 
4769 /* Make necessary preparation for Receiver gain balance calibration */
4770 static int iwk_rxgain_diff_init(iwk_sc_t *sc)
4771 {
4772 	int i, rv;
4773 	struct iwk_calibration_cmd cmd;
4774 	struct iwk_rx_gain_diff *gain_diff_p;
4775 
4776 	gain_diff_p = &sc->sc_rxgain_diff;
4777 
4778 	(void) memset(gain_diff_p, 0, sizeof (struct iwk_rx_gain_diff));
4779 	(void) memset(&cmd, 0, sizeof (struct iwk_calibration_cmd));
4780 
4781 	for (i = 0; i < RX_CHAINS_NUM; i++) {
4782 		gain_diff_p->gain_diff_chain[i] = CHAIN_GAIN_DIFF_INIT_VAL;
4783 	}
4784 
4785 	if (iwk_is_associated(sc)) {
4786 		cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
4787 		cmd.diff_gain_a = 0;
4788 		cmd.diff_gain_b = 0;
4789 		cmd.diff_gain_c = 0;
4790 
4791 		/* assume the gains of every Rx chains is balanceable */
4792 		rv = iwk_cmd(sc, REPLY_PHY_CALIBRATION_CMD, &cmd,
4793 		    sizeof (cmd), 1);
4794 		if (rv) {
4795 			return (rv);
4796 		}
4797 
4798 		gain_diff_p->state = IWK_GAIN_DIFF_ACCUMULATE;
4799 	}
4800 
4801 	return (IWK_SUCCESS);
4802 }
4803 
4804 /*
4805  * make Receiver gain balance to balance Rx gain between Rx chains
4806  * and determine which chain is disconnected
4807  */
4808 static int iwk_rxgain_diff(iwk_sc_t *sc)
4809 {
4810 	int i, is_24G, rv;
4811 	int max_beacon_chain_n;
4812 	int min_noise_chain_n;
4813 	uint16_t channel_n;
4814 	int32_t beacon_diff;
4815 	int32_t noise_diff;
4816 	uint32_t noise_chain_a, noise_chain_b, noise_chain_c;
4817 	uint32_t beacon_chain_a, beacon_chain_b, beacon_chain_c;
4818 	struct iwk_calibration_cmd cmd;
4819 	uint32_t beacon_aver[RX_CHAINS_NUM] = {0xFFFFFFFF};
4820 	uint32_t noise_aver[RX_CHAINS_NUM] = {0xFFFFFFFF};
4821 	struct statistics_rx_non_phy *rx_general_p =
4822 	    &sc->sc_statistics.rx.general;
4823 	struct iwk_rx_gain_diff *gain_diff_p = &sc->sc_rxgain_diff;
4824 
4825 	if (INTERFERENCE_DATA_AVAILABLE !=
4826 	    rx_general_p->interference_data_flag) {
4827 		return (IWK_SUCCESS);
4828 	}
4829 
4830 	if (IWK_GAIN_DIFF_ACCUMULATE != gain_diff_p->state) {
4831 		return (IWK_SUCCESS);
4832 	}
4833 
4834 	is_24G = iwk_is_24G_band(sc);
4835 	channel_n = sc->sc_config.chan;	 /* channel number */
4836 
4837 	if ((channel_n != (sc->sc_statistics.flag >> 16)) ||
4838 	    ((STATISTICS_REPLY_FLG_BAND_24G_MSK ==
4839 	    (sc->sc_statistics.flag & STATISTICS_REPLY_FLG_BAND_24G_MSK)) &&
4840 	    !is_24G)) {
4841 		return (IWK_SUCCESS);
4842 	}
4843 
4844 	/* Rx chain's noise strength from statistics notification */
4845 	noise_chain_a = rx_general_p->beacon_silence_rssi_a & 0xFF;
4846 	noise_chain_b = rx_general_p->beacon_silence_rssi_b & 0xFF;
4847 	noise_chain_c = rx_general_p->beacon_silence_rssi_c & 0xFF;
4848 
4849 	/* Rx chain's beacon strength from statistics notification */
4850 	beacon_chain_a = rx_general_p->beacon_rssi_a & 0xFF;
4851 	beacon_chain_b = rx_general_p->beacon_rssi_b & 0xFF;
4852 	beacon_chain_c = rx_general_p->beacon_rssi_c & 0xFF;
4853 
4854 	gain_diff_p->beacon_count++;
4855 
4856 	/* accumulate chain's noise strength */
4857 	gain_diff_p->noise_stren_a += noise_chain_a;
4858 	gain_diff_p->noise_stren_b += noise_chain_b;
4859 	gain_diff_p->noise_stren_c += noise_chain_c;
4860 
4861 	/* accumulate chain's beacon strength */
4862 	gain_diff_p->beacon_stren_a += beacon_chain_a;
4863 	gain_diff_p->beacon_stren_b += beacon_chain_b;
4864 	gain_diff_p->beacon_stren_c += beacon_chain_c;
4865 
4866 	if (BEACON_NUM_20 == gain_diff_p->beacon_count) {
4867 		/* calculate average beacon strength */
4868 		beacon_aver[0] = (gain_diff_p->beacon_stren_a) / BEACON_NUM_20;
4869 		beacon_aver[1] = (gain_diff_p->beacon_stren_b) / BEACON_NUM_20;
4870 		beacon_aver[2] = (gain_diff_p->beacon_stren_c) / BEACON_NUM_20;
4871 
4872 		/* calculate average noise strength */
4873 		noise_aver[0] = (gain_diff_p->noise_stren_a) / BEACON_NUM_20;
4874 		noise_aver[1] = (gain_diff_p->noise_stren_b) / BEACON_NUM_20;
4875 		noise_aver[2] = (gain_diff_p->noise_stren_b) / BEACON_NUM_20;
4876 
4877 		/* determine maximum beacon strength among 3 chains */
4878 		if ((beacon_aver[0] >= beacon_aver[1]) &&
4879 		    (beacon_aver[0] >= beacon_aver[2])) {
4880 			max_beacon_chain_n = 0;
4881 			gain_diff_p->connected_chains = 1 << 0;
4882 		} else if (beacon_aver[1] >= beacon_aver[2]) {
4883 			max_beacon_chain_n = 1;
4884 			gain_diff_p->connected_chains = 1 << 1;
4885 		} else {
4886 			max_beacon_chain_n = 2;
4887 			gain_diff_p->connected_chains = 1 << 2;
4888 		}
4889 
4890 		/* determine which chain is disconnected */
4891 		for (i = 0; i < RX_CHAINS_NUM; i++) {
4892 			if (i != max_beacon_chain_n) {
4893 				beacon_diff = beacon_aver[max_beacon_chain_n] -
4894 				    beacon_aver[i];
4895 				if (beacon_diff > MAX_ALLOWED_DIFF) {
4896 					gain_diff_p->disconnect_chain[i] = 1;
4897 				} else {
4898 					gain_diff_p->connected_chains |=
4899 					    (1 << i);
4900 				}
4901 			}
4902 		}
4903 
4904 		/*
4905 		 * if chain A and B are both disconnected,
4906 		 * assume the stronger in beacon strength is connected
4907 		 */
4908 		if (gain_diff_p->disconnect_chain[0] &&
4909 		    gain_diff_p->disconnect_chain[1]) {
4910 			if (beacon_aver[0] >= beacon_aver[1]) {
4911 				gain_diff_p->disconnect_chain[0] = 0;
4912 				gain_diff_p->connected_chains |= (1 << 0);
4913 			} else {
4914 				gain_diff_p->disconnect_chain[1] = 0;
4915 				gain_diff_p->connected_chains |= (1 << 1);
4916 			}
4917 		}
4918 
4919 		/* determine minimum noise strength among 3 chains */
4920 		if (!gain_diff_p->disconnect_chain[0]) {
4921 			min_noise_chain_n = 0;
4922 
4923 			for (i = 0; i < RX_CHAINS_NUM; i++) {
4924 				if (!gain_diff_p->disconnect_chain[i] &&
4925 				    (noise_aver[i] <=
4926 				    noise_aver[min_noise_chain_n])) {
4927 					min_noise_chain_n = i;
4928 				}
4929 
4930 			}
4931 		} else {
4932 			min_noise_chain_n = 1;
4933 
4934 			for (i = 0; i < RX_CHAINS_NUM; i++) {
4935 				if (!gain_diff_p->disconnect_chain[i] &&
4936 				    (noise_aver[i] <=
4937 				    noise_aver[min_noise_chain_n])) {
4938 					min_noise_chain_n = i;
4939 				}
4940 			}
4941 		}
4942 
4943 		gain_diff_p->gain_diff_chain[min_noise_chain_n] = 0;
4944 
4945 		/* determine gain difference between chains */
4946 		for (i = 0; i < RX_CHAINS_NUM; i++) {
4947 			if (!gain_diff_p->disconnect_chain[i] &&
4948 			    (CHAIN_GAIN_DIFF_INIT_VAL ==
4949 			    gain_diff_p->gain_diff_chain[i])) {
4950 
4951 				noise_diff = noise_aver[i] -
4952 				    noise_aver[min_noise_chain_n];
4953 				gain_diff_p->gain_diff_chain[i] =
4954 				    (uint8_t)((noise_diff * 10) / 15);
4955 
4956 				if (gain_diff_p->gain_diff_chain[i] > 3) {
4957 					gain_diff_p->gain_diff_chain[i] = 3;
4958 				}
4959 
4960 				gain_diff_p->gain_diff_chain[i] |= (1 << 2);
4961 			} else {
4962 				gain_diff_p->gain_diff_chain[i] = 0;
4963 			}
4964 		}
4965 
4966 		if (!gain_diff_p->gain_diff_send) {
4967 			gain_diff_p->gain_diff_send = 1;
4968 
4969 			(void) memset(&cmd, 0, sizeof (cmd));
4970 
4971 			cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
4972 			cmd.diff_gain_a = gain_diff_p->gain_diff_chain[0];
4973 			cmd.diff_gain_b = gain_diff_p->gain_diff_chain[1];
4974 			cmd.diff_gain_c = gain_diff_p->gain_diff_chain[2];
4975 
4976 			/*
4977 			 * send out PHY calibration command to
4978 			 * adjust every chain's Rx gain
4979 			 */
4980 			rv = iwk_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
4981 			    &cmd, sizeof (cmd), 1);
4982 			if (rv) {
4983 				return (rv);
4984 			}
4985 
4986 			gain_diff_p->state = IWK_GAIN_DIFF_CALIBRATED;
4987 		}
4988 
4989 		gain_diff_p->beacon_stren_a = 0;
4990 		gain_diff_p->beacon_stren_b = 0;
4991 		gain_diff_p->beacon_stren_c = 0;
4992 
4993 		gain_diff_p->noise_stren_a = 0;
4994 		gain_diff_p->noise_stren_b = 0;
4995 		gain_diff_p->noise_stren_c = 0;
4996 	}
4997 
4998 	return (IWK_SUCCESS);
4999 }
5000 
5001 /* Make necessary preparation for Receiver sensitivity calibration */
5002 static int iwk_rx_sens_init(iwk_sc_t *sc)
5003 {
5004 	int i, rv;
5005 	struct iwk_rx_sensitivity_cmd cmd;
5006 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5007 
5008 	(void) memset(&cmd, 0, sizeof (struct iwk_rx_sensitivity_cmd));
5009 	(void) memset(rx_sens_p, 0, sizeof (struct iwk_rx_sensitivity));
5010 
5011 	rx_sens_p->auto_corr_ofdm_x4 = 90;
5012 	rx_sens_p->auto_corr_mrc_ofdm_x4 = 170;
5013 	rx_sens_p->auto_corr_ofdm_x1 = 105;
5014 	rx_sens_p->auto_corr_mrc_ofdm_x1 = 220;
5015 
5016 	rx_sens_p->auto_corr_cck_x4 = 125;
5017 	rx_sens_p->auto_corr_mrc_cck_x4 = 200;
5018 	rx_sens_p->min_energy_det_cck = 100;
5019 
5020 	rx_sens_p->flags &= (~IWK_SENSITIVITY_CALIB_ALLOW_MSK);
5021 	rx_sens_p->flags &= (~IWK_SENSITIVITY_OFDM_UPDATE_MSK);
5022 	rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
5023 
5024 	rx_sens_p->last_bad_plcp_cnt_ofdm = 0;
5025 	rx_sens_p->last_false_alarm_cnt_ofdm = 0;
5026 	rx_sens_p->last_bad_plcp_cnt_cck = 0;
5027 	rx_sens_p->last_false_alarm_cnt_cck = 0;
5028 
5029 	rx_sens_p->cck_curr_state = IWK_TOO_MANY_FALSE_ALARM;
5030 	rx_sens_p->cck_prev_state = IWK_TOO_MANY_FALSE_ALARM;
5031 	rx_sens_p->cck_no_false_alarm_num = 0;
5032 	rx_sens_p->cck_beacon_idx = 0;
5033 
5034 	for (i = 0; i < 10; i++) {
5035 		rx_sens_p->cck_beacon_min[i] = 0;
5036 	}
5037 
5038 	rx_sens_p->cck_noise_idx = 0;
5039 	rx_sens_p->cck_noise_ref = 0;
5040 
5041 	for (i = 0; i < 20; i++) {
5042 		rx_sens_p->cck_noise_max[i] = 0;
5043 	}
5044 
5045 	rx_sens_p->cck_noise_diff = 0;
5046 	rx_sens_p->cck_no_false_alarm_num = 0;
5047 
5048 	cmd.control = IWK_SENSITIVITY_CONTROL_WORK_TABLE;
5049 
5050 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
5051 	    rx_sens_p->auto_corr_ofdm_x4;
5052 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
5053 	    rx_sens_p->auto_corr_mrc_ofdm_x4;
5054 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
5055 	    rx_sens_p->auto_corr_ofdm_x1;
5056 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
5057 	    rx_sens_p->auto_corr_mrc_ofdm_x1;
5058 
5059 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
5060 	    rx_sens_p->auto_corr_cck_x4;
5061 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
5062 	    rx_sens_p->auto_corr_mrc_cck_x4;
5063 	cmd.table[MIN_ENERGY_CCK_DET_IDX] = rx_sens_p->min_energy_det_cck;
5064 
5065 	cmd.table[MIN_ENERGY_OFDM_DET_IDX] = 100;
5066 	cmd.table[BARKER_CORR_TH_ADD_MIN_IDX] = 190;
5067 	cmd.table[BARKER_CORR_TH_ADD_MIN_MRC_IDX] = 390;
5068 	cmd.table[PTAM_ENERGY_TH_IDX] = 62;
5069 
5070 	/* at first, set up Rx to maximum sensitivity */
5071 	rv = iwk_cmd(sc, SENSITIVITY_CMD, &cmd, sizeof (cmd), 1);
5072 	if (rv) {
5073 		cmn_err(CE_WARN, "iwk_rx_sens_init(): "
5074 		    "in the process of initialization, "
5075 		    "failed to send rx sensitivity command\n");
5076 		return (rv);
5077 	}
5078 
5079 	rx_sens_p->flags |= IWK_SENSITIVITY_CALIB_ALLOW_MSK;
5080 
5081 	return (IWK_SUCCESS);
5082 }
5083 
5084 /*
5085  * make Receiver sensitivity calibration to adjust every chain's Rx sensitivity.
5086  * for more infomation, please refer to iwk_calibration.h file
5087  */
5088 static int iwk_rx_sens(iwk_sc_t *sc)
5089 {
5090 	int rv;
5091 	uint32_t actual_rx_time;
5092 	struct statistics_rx_non_phy *rx_general_p =
5093 	    &sc->sc_statistics.rx.general;
5094 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5095 	struct iwk_rx_sensitivity_cmd cmd;
5096 
5097 	if (!(rx_sens_p->flags & IWK_SENSITIVITY_CALIB_ALLOW_MSK)) {
5098 		cmn_err(CE_WARN, "iwk_rx_sens(): "
5099 		    "sensitivity initialization has not finished.\n");
5100 		return (DDI_FAILURE);
5101 	}
5102 
5103 	if (INTERFERENCE_DATA_AVAILABLE !=
5104 	    rx_general_p->interference_data_flag) {
5105 		cmn_err(CE_WARN, "iwk_rx_sens(): "
5106 		    "can't make rx sensitivity calibration,"
5107 		    "because of invalid statistics\n");
5108 		return (DDI_FAILURE);
5109 	}
5110 
5111 	actual_rx_time = rx_general_p->channel_load;
5112 	if (!actual_rx_time) {
5113 		IWK_DBG((IWK_DEBUG_CALIBRATION, "iwk_rx_sens(): "
5114 		    "can't make rx sensitivity calibration,"
5115 		    "because has not enough rx time\n"));
5116 		return (DDI_FAILURE);
5117 	}
5118 
5119 	/* make Rx sensitivity calibration for OFDM mode */
5120 	rv = iwk_ofdm_sens(sc, actual_rx_time);
5121 	if (rv) {
5122 		return (rv);
5123 	}
5124 
5125 	/* make Rx sensitivity calibration for CCK mode */
5126 	rv = iwk_cck_sens(sc, actual_rx_time);
5127 	if (rv) {
5128 		return (rv);
5129 	}
5130 
5131 	/*
5132 	 * if the sum of false alarm had not changed, nothing will be done
5133 	 */
5134 	if ((!(rx_sens_p->flags & IWK_SENSITIVITY_OFDM_UPDATE_MSK)) &&
5135 	    (!(rx_sens_p->flags & IWK_SENSITIVITY_CCK_UPDATE_MSK))) {
5136 		return (IWK_SUCCESS);
5137 	}
5138 
5139 	cmd.control = IWK_SENSITIVITY_CONTROL_WORK_TABLE;
5140 
5141 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
5142 	    rx_sens_p->auto_corr_ofdm_x4;
5143 	cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
5144 	    rx_sens_p->auto_corr_mrc_ofdm_x4;
5145 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
5146 	    rx_sens_p->auto_corr_ofdm_x1;
5147 	cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
5148 	    rx_sens_p->auto_corr_mrc_ofdm_x1;
5149 
5150 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
5151 	    rx_sens_p->auto_corr_cck_x4;
5152 	cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
5153 	    rx_sens_p->auto_corr_mrc_cck_x4;
5154 	cmd.table[MIN_ENERGY_CCK_DET_IDX] =
5155 	    rx_sens_p->min_energy_det_cck;
5156 
5157 	cmd.table[MIN_ENERGY_OFDM_DET_IDX] = 100;
5158 	cmd.table[BARKER_CORR_TH_ADD_MIN_IDX] = 190;
5159 	cmd.table[BARKER_CORR_TH_ADD_MIN_MRC_IDX] = 390;
5160 	cmd.table[PTAM_ENERGY_TH_IDX] = 62;
5161 
5162 	/*
5163 	 * send sensitivity command to complete actual sensitivity calibration
5164 	 */
5165 	rv = iwk_cmd(sc, SENSITIVITY_CMD, &cmd, sizeof (cmd), 1);
5166 	if (rv) {
5167 		cmn_err(CE_WARN, "iwk_rx_sens(): "
5168 		    "fail to send rx sensitivity command\n");
5169 		return (rv);
5170 	}
5171 
5172 	return (IWK_SUCCESS);
5173 
5174 }
5175 
5176 /*
5177  * make Rx sensitivity calibration for CCK mode.
5178  * This is preparing parameters for Sensitivity command
5179  */
5180 static int iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time)
5181 {
5182 	int i;
5183 	uint8_t noise_a, noise_b, noise_c;
5184 	uint8_t max_noise_abc, max_noise_20;
5185 	uint32_t beacon_a, beacon_b, beacon_c;
5186 	uint32_t min_beacon_abc, max_beacon_10;
5187 	uint32_t cck_fa, cck_bp;
5188 	uint32_t cck_sum_fa_bp;
5189 	uint32_t temp;
5190 	struct statistics_rx_non_phy *rx_general_p =
5191 	    &sc->sc_statistics.rx.general;
5192 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5193 
5194 	cck_fa = sc->sc_statistics.rx.cck.false_alarm_cnt;
5195 	cck_bp = sc->sc_statistics.rx.cck.plcp_err;
5196 
5197 	/* accumulate false alarm */
5198 	if (rx_sens_p->last_false_alarm_cnt_cck > cck_fa) {
5199 		temp = rx_sens_p->last_false_alarm_cnt_cck;
5200 		rx_sens_p->last_false_alarm_cnt_cck = cck_fa;
5201 		cck_fa += (0xFFFFFFFF - temp);
5202 	} else {
5203 		cck_fa -= rx_sens_p->last_false_alarm_cnt_cck;
5204 		rx_sens_p->last_false_alarm_cnt_cck += cck_fa;
5205 	}
5206 
5207 	/* accumulate bad plcp */
5208 	if (rx_sens_p->last_bad_plcp_cnt_cck > cck_bp) {
5209 		temp = rx_sens_p->last_bad_plcp_cnt_cck;
5210 		rx_sens_p->last_bad_plcp_cnt_cck = cck_bp;
5211 		cck_bp += (0xFFFFFFFF - temp);
5212 	} else {
5213 		cck_bp -= rx_sens_p->last_bad_plcp_cnt_cck;
5214 		rx_sens_p->last_bad_plcp_cnt_cck += cck_bp;
5215 	}
5216 
5217 	/*
5218 	 * calculate relative value
5219 	 */
5220 	cck_sum_fa_bp = (cck_fa + cck_bp) * 200 * 1024;
5221 	rx_sens_p->cck_noise_diff = 0;
5222 
5223 	noise_a =
5224 	    (uint8_t)((rx_general_p->beacon_silence_rssi_a & 0xFF00) >> 8);
5225 	noise_b =
5226 	    (uint8_t)((rx_general_p->beacon_silence_rssi_b & 0xFF00) >> 8);
5227 	noise_c =
5228 	    (uint8_t)((rx_general_p->beacon_silence_rssi_c & 0xFF00) >> 8);
5229 
5230 	beacon_a = rx_general_p->beacon_energy_a;
5231 	beacon_b = rx_general_p->beacon_energy_b;
5232 	beacon_c = rx_general_p->beacon_energy_c;
5233 
5234 	/* determine maximum noise among 3 chains */
5235 	if ((noise_a >= noise_b) && (noise_a >= noise_c)) {
5236 		max_noise_abc = noise_a;
5237 	} else if (noise_b >= noise_c) {
5238 		max_noise_abc = noise_b;
5239 	} else {
5240 		max_noise_abc = noise_c;
5241 	}
5242 
5243 	/* record maximum noise among 3 chains */
5244 	rx_sens_p->cck_noise_max[rx_sens_p->cck_noise_idx] = max_noise_abc;
5245 	rx_sens_p->cck_noise_idx++;
5246 	if (rx_sens_p->cck_noise_idx >= 20) {
5247 		rx_sens_p->cck_noise_idx = 0;
5248 	}
5249 
5250 	/* determine maximum noise among 20 max noise */
5251 	max_noise_20 = rx_sens_p->cck_noise_max[0];
5252 	for (i = 0; i < 20; i++) {
5253 		if (rx_sens_p->cck_noise_max[i] >= max_noise_20) {
5254 			max_noise_20 = rx_sens_p->cck_noise_max[i];
5255 		}
5256 	}
5257 
5258 	/* determine minimum beacon among 3 chains */
5259 	if ((beacon_a <= beacon_b) && (beacon_a <= beacon_c)) {
5260 		min_beacon_abc = beacon_a;
5261 	} else if (beacon_b <= beacon_c) {
5262 		min_beacon_abc = beacon_b;
5263 	} else {
5264 		min_beacon_abc = beacon_c;
5265 	}
5266 
5267 	/* record miminum beacon among 3 chains */
5268 	rx_sens_p->cck_beacon_min[rx_sens_p->cck_beacon_idx] = min_beacon_abc;
5269 	rx_sens_p->cck_beacon_idx++;
5270 	if (rx_sens_p->cck_beacon_idx >= 10) {
5271 		rx_sens_p->cck_beacon_idx = 0;
5272 	}
5273 
5274 	/* determine maximum beacon among 10 miminum beacon among 3 chains */
5275 	max_beacon_10 = rx_sens_p->cck_beacon_min[0];
5276 	for (i = 0; i < 10; i++) {
5277 		if (rx_sens_p->cck_beacon_min[i] >= max_beacon_10) {
5278 			max_beacon_10 = rx_sens_p->cck_beacon_min[i];
5279 		}
5280 	}
5281 
5282 	/* add a little margin */
5283 	max_beacon_10 += 6;
5284 
5285 	/* record the count of having no false alarms */
5286 	if (cck_sum_fa_bp < (5 * actual_rx_time)) {
5287 		rx_sens_p->cck_no_false_alarm_num++;
5288 	} else {
5289 		rx_sens_p->cck_no_false_alarm_num = 0;
5290 	}
5291 
5292 	/*
5293 	 * adjust parameters in sensitivity command
5294 	 * according to different status.
5295 	 * for more infomation, please refer to iwk_calibration.h file
5296 	 */
5297 	if (cck_sum_fa_bp > (50 * actual_rx_time)) {
5298 		rx_sens_p->cck_curr_state = IWK_TOO_MANY_FALSE_ALARM;
5299 
5300 		if (rx_sens_p->auto_corr_cck_x4 > 160) {
5301 			rx_sens_p->cck_noise_ref = max_noise_20;
5302 
5303 			if (rx_sens_p->min_energy_det_cck > 2) {
5304 				rx_sens_p->min_energy_det_cck -= 2;
5305 			}
5306 		}
5307 
5308 		if (rx_sens_p->auto_corr_cck_x4 < 160) {
5309 			rx_sens_p->auto_corr_cck_x4 = 160 + 1;
5310 		} else {
5311 			if ((rx_sens_p->auto_corr_cck_x4 + 3) < 200) {
5312 				rx_sens_p->auto_corr_cck_x4 += 3;
5313 			} else {
5314 				rx_sens_p->auto_corr_cck_x4 = 200;
5315 			}
5316 		}
5317 
5318 		if ((rx_sens_p->auto_corr_mrc_cck_x4 + 3) < 400) {
5319 			rx_sens_p->auto_corr_mrc_cck_x4 += 3;
5320 		} else {
5321 			rx_sens_p->auto_corr_mrc_cck_x4 = 400;
5322 		}
5323 
5324 		rx_sens_p->flags |= IWK_SENSITIVITY_CCK_UPDATE_MSK;
5325 
5326 	} else if (cck_sum_fa_bp < (5 * actual_rx_time)) {
5327 		rx_sens_p->cck_curr_state = IWK_TOO_FEW_FALSE_ALARM;
5328 
5329 		rx_sens_p->cck_noise_diff = (int32_t)rx_sens_p->cck_noise_ref -
5330 		    (int32_t)max_noise_20;
5331 
5332 		if ((rx_sens_p->cck_prev_state != IWK_TOO_MANY_FALSE_ALARM) &&
5333 		    ((rx_sens_p->cck_noise_diff > 2) ||
5334 		    (rx_sens_p->cck_no_false_alarm_num > 100))) {
5335 			if ((rx_sens_p->min_energy_det_cck + 2) < 97) {
5336 				rx_sens_p->min_energy_det_cck += 2;
5337 			} else {
5338 				rx_sens_p->min_energy_det_cck = 97;
5339 			}
5340 
5341 			if ((rx_sens_p->auto_corr_cck_x4 - 3) > 125) {
5342 				rx_sens_p->auto_corr_cck_x4 -= 3;
5343 			} else {
5344 				rx_sens_p->auto_corr_cck_x4 = 125;
5345 			}
5346 
5347 			if ((rx_sens_p->auto_corr_mrc_cck_x4 -3) > 200) {
5348 				rx_sens_p->auto_corr_mrc_cck_x4 -= 3;
5349 			} else {
5350 				rx_sens_p->auto_corr_mrc_cck_x4 = 200;
5351 			}
5352 
5353 			rx_sens_p->flags |= IWK_SENSITIVITY_CCK_UPDATE_MSK;
5354 		} else {
5355 			rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
5356 		}
5357 	} else {
5358 		rx_sens_p->cck_curr_state = IWK_GOOD_RANGE_FALSE_ALARM;
5359 
5360 		rx_sens_p->cck_noise_ref = max_noise_20;
5361 
5362 		if (IWK_TOO_MANY_FALSE_ALARM == rx_sens_p->cck_prev_state) {
5363 			rx_sens_p->min_energy_det_cck -= 8;
5364 		}
5365 
5366 		rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
5367 	}
5368 
5369 	if (rx_sens_p->min_energy_det_cck < max_beacon_10) {
5370 		rx_sens_p->min_energy_det_cck = (uint16_t)max_beacon_10;
5371 	}
5372 
5373 	rx_sens_p->cck_prev_state = rx_sens_p->cck_curr_state;
5374 
5375 	return (IWK_SUCCESS);
5376 }
5377 
5378 /*
5379  * make Rx sensitivity calibration for OFDM mode.
5380  * This is preparing parameters for Sensitivity command
5381  */
5382 static int iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time)
5383 {
5384 	uint32_t temp;
5385 	uint16_t temp1;
5386 	uint32_t ofdm_fa, ofdm_bp;
5387 	uint32_t ofdm_sum_fa_bp;
5388 	struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5389 
5390 	ofdm_fa = sc->sc_statistics.rx.ofdm.false_alarm_cnt;
5391 	ofdm_bp = sc->sc_statistics.rx.ofdm.plcp_err;
5392 
5393 	/* accumulate false alarm */
5394 	if (rx_sens_p->last_false_alarm_cnt_ofdm > ofdm_fa) {
5395 		temp = rx_sens_p->last_false_alarm_cnt_ofdm;
5396 		rx_sens_p->last_false_alarm_cnt_ofdm = ofdm_fa;
5397 		ofdm_fa += (0xFFFFFFFF - temp);
5398 	} else {
5399 		ofdm_fa -= rx_sens_p->last_false_alarm_cnt_ofdm;
5400 		rx_sens_p->last_false_alarm_cnt_ofdm += ofdm_fa;
5401 	}
5402 
5403 	/* accumulate bad plcp */
5404 	if (rx_sens_p->last_bad_plcp_cnt_ofdm > ofdm_bp) {
5405 		temp = rx_sens_p->last_bad_plcp_cnt_ofdm;
5406 		rx_sens_p->last_bad_plcp_cnt_ofdm = ofdm_bp;
5407 		ofdm_bp += (0xFFFFFFFF - temp);
5408 	} else {
5409 		ofdm_bp -= rx_sens_p->last_bad_plcp_cnt_ofdm;
5410 		rx_sens_p->last_bad_plcp_cnt_ofdm += ofdm_bp;
5411 	}
5412 
5413 	ofdm_sum_fa_bp = (ofdm_fa + ofdm_bp) * 200 * 1024; /* relative value */
5414 
5415 	/*
5416 	 * adjust parameter in sensitivity command according to different status
5417 	 */
5418 	if (ofdm_sum_fa_bp > (50 * actual_rx_time)) {
5419 		temp1 = rx_sens_p->auto_corr_ofdm_x4 + 1;
5420 		rx_sens_p->auto_corr_ofdm_x4 = (temp1 <= 120) ? temp1 : 120;
5421 
5422 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x4 + 1;
5423 		rx_sens_p->auto_corr_mrc_ofdm_x4 =
5424 		    (temp1 <= 210) ? temp1 : 210;
5425 
5426 		temp1 = rx_sens_p->auto_corr_ofdm_x1 + 1;
5427 		rx_sens_p->auto_corr_ofdm_x1 = (temp1 <= 140) ? temp1 : 140;
5428 
5429 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x1 + 1;
5430 		rx_sens_p->auto_corr_mrc_ofdm_x1 =
5431 		    (temp1 <= 270) ? temp1 : 270;
5432 
5433 		rx_sens_p->flags |= IWK_SENSITIVITY_OFDM_UPDATE_MSK;
5434 
5435 	} else if (ofdm_sum_fa_bp < (5 * actual_rx_time)) {
5436 		temp1 = rx_sens_p->auto_corr_ofdm_x4 - 1;
5437 		rx_sens_p->auto_corr_ofdm_x4 = (temp1 >= 85) ? temp1 : 85;
5438 
5439 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x4 - 1;
5440 		rx_sens_p->auto_corr_mrc_ofdm_x4 =
5441 		    (temp1 >= 170) ? temp1 : 170;
5442 
5443 		temp1 = rx_sens_p->auto_corr_ofdm_x1 - 1;
5444 		rx_sens_p->auto_corr_ofdm_x1 = (temp1 >= 105) ? temp1 : 105;
5445 
5446 		temp1 = rx_sens_p->auto_corr_mrc_ofdm_x1 - 1;
5447 		rx_sens_p->auto_corr_mrc_ofdm_x1 =
5448 		    (temp1 >= 220) ? temp1 : 220;
5449 
5450 		rx_sens_p->flags |= IWK_SENSITIVITY_OFDM_UPDATE_MSK;
5451 
5452 	} else {
5453 		rx_sens_p->flags &= (~IWK_SENSITIVITY_OFDM_UPDATE_MSK);
5454 	}
5455 
5456 	return (IWK_SUCCESS);
5457 }
5458 
5459 /*
5460  * 1)  log_event_table_ptr indicates base of the event log.  This traces
5461  *     a 256-entry history of uCode execution within a circular buffer.
5462  *     Its header format is:
5463  *
5464  *	uint32_t log_size;	log capacity (in number of entries)
5465  *	uint32_t type;	(1) timestamp with each entry, (0) no timestamp
5466  *	uint32_t wraps;	# times uCode has wrapped to top of circular buffer
5467  *      uint32_t write_index;	next circular buffer entry that uCode would fill
5468  *
5469  *     The header is followed by the circular buffer of log entries.  Entries
5470  *     with timestamps have the following format:
5471  *
5472  *	uint32_t event_id;     range 0 - 1500
5473  *	uint32_t timestamp;    low 32 bits of TSF (of network, if associated)
5474  *	uint32_t data;         event_id-specific data value
5475  *
5476  *     Entries without timestamps contain only event_id and data.
5477  */
5478 
5479 /*
5480  * iwk_write_event_log - Write event log to dmesg
5481  */
5482 static void iwk_write_event_log(iwk_sc_t *sc)
5483 {
5484 	uint32_t log_event_table_ptr;	/* Start address of event table */
5485 	uint32_t startptr;	/* Start address of log data */
5486 	uint32_t logptr;	/* address of log data entry */
5487 	uint32_t i, n, num_events;
5488 	uint32_t event_id, data1, data2; /* log data */
5489 
5490 	uint32_t log_size;   /* log capacity (in number of entries) */
5491 	uint32_t type;	/* (1)timestamp with each entry,(0) no timestamp */
5492 	uint32_t wraps;	/* # times uCode has wrapped to */
5493 			/* the top of circular buffer */
5494 	uint32_t idx; /* index of entry to be filled in next */
5495 
5496 	log_event_table_ptr = sc->sc_card_alive_run.log_event_table_ptr;
5497 	if (!(log_event_table_ptr)) {
5498 		IWK_DBG((IWK_DEBUG_EEPROM, "NULL event table pointer\n"));
5499 		return;
5500 	}
5501 
5502 	iwk_mac_access_enter(sc);
5503 
5504 	/* Read log header */
5505 	log_size = iwk_mem_read(sc, log_event_table_ptr);
5506 	log_event_table_ptr += sizeof (uint32_t); /* addr of "type" */
5507 	type = iwk_mem_read(sc, log_event_table_ptr);
5508 	log_event_table_ptr += sizeof (uint32_t); /* addr of "wraps" */
5509 	wraps = iwk_mem_read(sc, log_event_table_ptr);
5510 	log_event_table_ptr += sizeof (uint32_t); /* addr of "idx" */
5511 	idx = iwk_mem_read(sc, log_event_table_ptr);
5512 	startptr = log_event_table_ptr +
5513 	    sizeof (uint32_t); /* addr of start of log data */
5514 	if (!log_size & !wraps) {
5515 		IWK_DBG((IWK_DEBUG_EEPROM, "Empty log\n"));
5516 		iwk_mac_access_exit(sc);
5517 		return;
5518 	}
5519 
5520 	if (!wraps) {
5521 		num_events = idx;
5522 		logptr = startptr;
5523 	} else {
5524 		num_events = log_size - idx;
5525 		n = type ? 2 : 3;
5526 		logptr = startptr + (idx * n * sizeof (uint32_t));
5527 	}
5528 
5529 	for (i = 0; i < num_events; i++) {
5530 		event_id = iwk_mem_read(sc, logptr);
5531 		logptr += sizeof (uint32_t);
5532 		data1 = iwk_mem_read(sc, logptr);
5533 		logptr += sizeof (uint32_t);
5534 		if (type == 0) { /* no timestamp */
5535 			IWK_DBG((IWK_DEBUG_EEPROM, "Event ID=%d, Data=%x0x",
5536 			    event_id, data1));
5537 		} else { /* timestamp */
5538 			data2 = iwk_mem_read(sc, logptr);
5539 			printf("Time=%d, Event ID=%d, Data=0x%x\n",
5540 			    data1, event_id, data2);
5541 			IWK_DBG((IWK_DEBUG_EEPROM,
5542 			    "Time=%d, Event ID=%d, Data=0x%x\n",
5543 			    data1, event_id, data2));
5544 			logptr += sizeof (uint32_t);
5545 		}
5546 	}
5547 
5548 	/*
5549 	 * Print the wrapped around entries, if any
5550 	 */
5551 	if (wraps) {
5552 		logptr = startptr;
5553 		for (i = 0; i < idx; i++) {
5554 			event_id = iwk_mem_read(sc, logptr);
5555 			logptr += sizeof (uint32_t);
5556 			data1 = iwk_mem_read(sc, logptr);
5557 			logptr += sizeof (uint32_t);
5558 			if (type == 0) { /* no timestamp */
5559 				IWK_DBG((IWK_DEBUG_EEPROM,
5560 				    "Event ID=%d, Data=%x0x", event_id, data1));
5561 			} else { /* timestamp */
5562 				data2 = iwk_mem_read(sc, logptr);
5563 				IWK_DBG((IWK_DEBUG_EEPROM,
5564 				    "Time = %d, Event ID=%d, Data=0x%x\n",
5565 				    data1, event_id, data2));
5566 				logptr += sizeof (uint32_t);
5567 			}
5568 		}
5569 	}
5570 
5571 	iwk_mac_access_exit(sc);
5572 }
5573 
5574 /*
5575  * error_event_table_ptr indicates base of the error log.  This contains
5576  * information about any uCode error that occurs.  For 4965, the format is:
5577  *
5578  * uint32_t valid;        (nonzero) valid, (0) log is empty
5579  * uint32_t error_id;     type of error
5580  * uint32_t pc;           program counter
5581  * uint32_t blink1;       branch link
5582  * uint32_t blink2;       branch link
5583  * uint32_t ilink1;       interrupt link
5584  * uint32_t ilink2;       interrupt link
5585  * uint32_t data1;        error-specific data
5586  * uint32_t data2;        error-specific data
5587  * uint32_t line;         source code line of error
5588  * uint32_t bcon_time;    beacon timer
5589  * uint32_t tsf_low;      network timestamp function timer
5590  * uint32_t tsf_hi;       network timestamp function timer
5591  */
5592 /*
5593  * iwk_write_error_log - Write error log to dmesg
5594  */
5595 static void iwk_write_error_log(iwk_sc_t *sc)
5596 {
5597 	uint32_t err_ptr;	/* Start address of error log */
5598 	uint32_t valid;		/* is error log valid */
5599 
5600 	err_ptr = sc->sc_card_alive_run.error_event_table_ptr;
5601 	if (!(err_ptr)) {
5602 		IWK_DBG((IWK_DEBUG_EEPROM, "NULL error table pointer\n"));
5603 		return;
5604 	}
5605 
5606 	iwk_mac_access_enter(sc);
5607 
5608 	valid = iwk_mem_read(sc, err_ptr);
5609 	if (!(valid)) {
5610 		IWK_DBG((IWK_DEBUG_EEPROM, "Error data not valid\n"));
5611 		iwk_mac_access_exit(sc);
5612 		return;
5613 	}
5614 	err_ptr += sizeof (uint32_t);
5615 	IWK_DBG((IWK_DEBUG_EEPROM, "err=%d ", iwk_mem_read(sc, err_ptr)));
5616 	err_ptr += sizeof (uint32_t);
5617 	IWK_DBG((IWK_DEBUG_EEPROM, "pc=0x%X ", iwk_mem_read(sc, err_ptr)));
5618 	err_ptr += sizeof (uint32_t);
5619 	IWK_DBG((IWK_DEBUG_EEPROM,
5620 	    "branch link1=0x%X ", iwk_mem_read(sc, err_ptr)));
5621 	err_ptr += sizeof (uint32_t);
5622 	IWK_DBG((IWK_DEBUG_EEPROM,
5623 	    "branch link2=0x%X ", iwk_mem_read(sc, err_ptr)));
5624 	err_ptr += sizeof (uint32_t);
5625 	IWK_DBG((IWK_DEBUG_EEPROM,
5626 	    "interrupt link1=0x%X ", iwk_mem_read(sc, err_ptr)));
5627 	err_ptr += sizeof (uint32_t);
5628 	IWK_DBG((IWK_DEBUG_EEPROM,
5629 	    "interrupt link2=0x%X ", iwk_mem_read(sc, err_ptr)));
5630 	err_ptr += sizeof (uint32_t);
5631 	IWK_DBG((IWK_DEBUG_EEPROM, "data1=0x%X ", iwk_mem_read(sc, err_ptr)));
5632 	err_ptr += sizeof (uint32_t);
5633 	IWK_DBG((IWK_DEBUG_EEPROM, "data2=0x%X ", iwk_mem_read(sc, err_ptr)));
5634 	err_ptr += sizeof (uint32_t);
5635 	IWK_DBG((IWK_DEBUG_EEPROM, "line=%d ", iwk_mem_read(sc, err_ptr)));
5636 	err_ptr += sizeof (uint32_t);
5637 	IWK_DBG((IWK_DEBUG_EEPROM, "bcon_time=%d ", iwk_mem_read(sc, err_ptr)));
5638 	err_ptr += sizeof (uint32_t);
5639 	IWK_DBG((IWK_DEBUG_EEPROM, "tsf_low=%d ", iwk_mem_read(sc, err_ptr)));
5640 	err_ptr += sizeof (uint32_t);
5641 	IWK_DBG((IWK_DEBUG_EEPROM, "tsf_hi=%d\n", iwk_mem_read(sc, err_ptr)));
5642 
5643 	iwk_mac_access_exit(sc);
5644 }
5645